61#include "llvm/IR/IntrinsicsAArch64.h"
96#define DEBUG_TYPE "aarch64-lower"
107 cl::desc(
"Allow AArch64 Local Dynamic TLS code generation"),
112 cl::desc(
"Enable AArch64 logical imm instruction "
126 "Expected legal vector type!");
176 if (Subtarget->
hasSVE()) {
202 if (useSVEForFixedLengthVectors()) {
204 if (useSVEForFixedLengthVectorVT(VT))
208 if (useSVEForFixedLengthVectorVT(VT))
917 if (Subtarget->
hasSVE()) {
935 if (VT.getScalarType() ==
MVT::i1) {
960 if (useSVEForFixedLengthVectors()) {
962 if (useSVEForFixedLengthVectorVT(VT))
963 addTypeForFixedLengthSVE(VT);
965 if (useSVEForFixedLengthVectorVT(VT))
966 addTypeForFixedLengthSVE(VT);
1050 for (
unsigned Opcode :
1063void AArch64TargetLowering::addTypeForFixedLengthSVE(
MVT VT) {
1081void AArch64TargetLowering::addDRTypeForNEON(
MVT VT) {
1086void AArch64TargetLowering::addQRTypeForNEON(
MVT VT) {
1105 uint64_t Mask = ((uint64_t)(-1LL) >> (64 -
Size)),
OrigMask = Mask;
1109 if (Imm == 0 || Imm == Mask ||
1113 unsigned EltSize =
Size;
1163 while (EltSize <
Size) {
1170 "demanded bits should never be altered");
1174 EVT VT = Op.getValueType();
1181 New =
TLO.DAG.getNode(Op.getOpcode(),
DL, VT, Op.getOperand(0),
1192 return TLO.CombineTo(Op, New);
1205 EVT VT = Op.getValueType();
1211 "i32 or i64 is expected after legalization.");
1218 switch (Op.getOpcode()) {
1222 NewOpc =
Size == 32 ? AArch64::ANDWri : AArch64::ANDXri;
1225 NewOpc =
Size == 32 ? AArch64::ORRWri : AArch64::ORRXri;
1228 NewOpc =
Size == 32 ? AArch64::EORWri : AArch64::EORXri;
1234 uint64_t Imm =
C->getZExtValue();
1243 switch (Op.getOpcode()) {
1267 case Intrinsic::aarch64_ldaxr:
1268 case Intrinsic::aarch64_ldxr: {
1284 case Intrinsic::aarch64_neon_umaxv:
1285 case Intrinsic::aarch64_neon_uminv: {
1290 MVT VT = Op.getOperand(1).getValueType().getSimpleVT();
1347 Ty.getSizeInBytes() != 16 ||
1370#define MAKE_CASE(V) \
1634 Register DestReg =
MI.getOperand(0).getReg();
1637 unsigned CondCode =
MI.getOperand(3).getImm();
1648 EndBB->transferSuccessorsAndUpdatePHIs(
MBB);
1660 EndBB->addLiveIn(AArch64::NZCV);
1669 MI.eraseFromParent();
1677 "SEH does not use catchret!");
1683 switch (
MI.getOpcode()) {
1690 case AArch64::F128CSEL:
1693 case TargetOpcode::STACKMAP:
1694 case TargetOpcode::PATCHPOINT:
1697 case AArch64::CATCHRET:
1866 bool IsLegal = (
C >> 12 == 0) || ((
C & 0xFFFULL) == 0 &&
C >> 24 == 0);
1868 <<
" legal: " << (
IsLegal ?
"yes\n" :
"no\n"));
1892 assert(VT !=
MVT::f16 &&
"Lowering of strict fp16 not yet implemented");
1920 if (
isCMN(RHS, CC)) {
1924 }
else if (
isCMN(LHS, CC)) {
2009 unsigned Opcode = 0;
2054 unsigned Depth = 0) {
2163 assert(
ValidL &&
"Valid conjunction/disjunction tree");
2170 assert(
ValidR &&
"Valid conjunction/disjunction tree");
2202 assert(Opcode ==
ISD::AND &&
"Valid conjunction/disjunction tree");
2203 assert(!Negate &&
"Valid conjunction/disjunction tree");
2247 uint64_t Mask =
MaskCst->getZExtValue();
2248 return (Mask == 0xFF || Mask == 0xFFFF || Mask == 0xFFFFFFFF);
2254 if (!Op.hasOneUse())
2260 unsigned Opc = Op.getOpcode();
2263 uint64_t Shift =
ShiftCst->getZExtValue();
2265 return (Shift <= 4) ? 2 : 1;
2266 EVT VT = Op.getValueType();
2279 uint64_t
C =
RHSC->getZExtValue();
2287 if ((VT ==
MVT::i32 &&
C != 0x80000000 &&
2289 (VT ==
MVT::i64 &&
C != 0x80000000ULL &&
2389 if (!Cmp && (
RHSC->isNullValue() ||
RHSC->isOne())) {
2405static std::pair<SDValue, SDValue>
2408 "Unsupported value type");
2411 SDValue LHS = Op.getOperand(0);
2412 SDValue RHS = Op.getOperand(1);
2414 switch (Op.getOpcode()) {
2437 bool IsSigned = Op.getOpcode() ==
ISD::SMULO;
2438 if (Op.getValueType() ==
MVT::i32) {
2486 assert(Op.getValueType() ==
MVT::i64 &&
"Expected an i64 value type");
2529 std::tie(Result, Chain) =
makeLibCall(DAG, Call,
Op.getValueType(), Ops,
2592 if (
CTVal->isAllOnesValue() &&
CFVal->isNullValue()) {
2599 if (
CTVal->isNullValue() &&
CFVal->isAllOnesValue()) {
2615 EVT VT = Op.getValueType();
2625 switch (Op.getOpcode()) {
2645 return DAG.
getNode(Opc,
SDLoc(Op), VTs, Op.getOperand(0), Op.getOperand(1));
2646 return DAG.
getNode(Opc,
SDLoc(Op), VTs, Op.getOperand(0), Op.getOperand(1),
2699 unsigned PrfOp = (IsWrite << 4) |
2714 return LowerF128Call(Op, DAG,
LC);
2725 if (useSVEForFixedLengthVectorVT(
SrcVT))
2752 EVT InVT =
Op.getOperand(0).getValueType();
2753 EVT VT =
Op.getValueType();
2762 Op.getOpcode(), dl,
Op.getValueType(),
2769 DAG.
getNode(
Op.getOpcode(), dl,
InVT.changeVectorElementTypeToInteger(),
2780 return DAG.
getNode(
Op.getOpcode(), dl, VT, Ext);
2792 if (
SrcVal.getValueType().isVector())
2793 return LowerVectorFP_TO_INT(Op, DAG);
2797 assert(!
IsStrict &&
"Lowering of strict fp16 not yet implemented");
2800 Op.getOpcode(), dl,
Op.getValueType(),
2816 return LowerF128Call(Op, DAG,
LC);
2823 EVT VT = Op.getValueType();
2825 SDValue In = Op.getOperand(0);
2831 InVT.getVectorNumElements());
2841 return DAG.
getNode(Op.getOpcode(), dl, VT, In);
2849 if (
Op.getValueType().isVector())
2858 assert(!
IsStrict &&
"Lowering of strict fp16 not yet implemented");
2882 return LowerF128Call(Op, DAG,
LC);
2891 EVT ArgVT =
Arg.getValueType();
2899 Entry.IsSExt =
false;
2900 Entry.IsZExt =
false;
2901 Args.push_back(Entry);
2904 : RTLIB::SINCOS_STRET_F32;
2936 if (
OrigVT.getSizeInBits() >= 64)
2939 assert(
OrigVT.isSimple() &&
"Expecting a simple value type");
2971 EVT VT =
N->getValueType(0);
3003 EVT VT =
N->getValueType(0);
3030 unsigned Opcode =
N->getOpcode();
3033 SDNode *
N1 =
N->getOperand(1).getNode();
3041 unsigned Opcode =
N->getOpcode();
3044 SDNode *
N1 =
N->getOperand(1).getNode();
3077 EVT VT = Op.getValueType();
3079 "unexpected type for custom-lowering ISD::MUL");
3080 SDNode *N0 = Op.getOperand(0).getNode();
3081 SDNode *
N1 = Op.getOperand(1).getNode();
3127 "unexpected types for extended operands to VMULL");
3149SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(
SDValue Op,
3155 case Intrinsic::thread_pointer: {
3159 case Intrinsic::aarch64_neon_abs: {
3160 EVT Ty =
Op.getValueType();
3166 }
else if (Ty.isVector() && Ty.isInteger() &&
isTypeLegal(Ty)) {
3172 case Intrinsic::aarch64_neon_smax:
3174 Op.getOperand(1),
Op.getOperand(2));
3175 case Intrinsic::aarch64_neon_umax:
3177 Op.getOperand(1),
Op.getOperand(2));
3178 case Intrinsic::aarch64_neon_smin:
3180 Op.getOperand(1),
Op.getOperand(2));
3181 case Intrinsic::aarch64_neon_umin:
3183 Op.getOperand(1),
Op.getOperand(2));
3185 case Intrinsic::aarch64_sve_sunpkhi:
3188 case Intrinsic::aarch64_sve_sunpklo:
3191 case Intrinsic::aarch64_sve_uunpkhi:
3194 case Intrinsic::aarch64_sve_uunpklo:
3197 case Intrinsic::aarch64_sve_clasta_n:
3199 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
3200 case Intrinsic::aarch64_sve_clastb_n:
3202 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
3203 case Intrinsic::aarch64_sve_lasta:
3205 Op.getOperand(1),
Op.getOperand(2));
3206 case Intrinsic::aarch64_sve_lastb:
3208 Op.getOperand(1),
Op.getOperand(2));
3209 case Intrinsic::aarch64_sve_rev:
3212 case Intrinsic::aarch64_sve_tbl:
3214 Op.getOperand(1),
Op.getOperand(2));
3215 case Intrinsic::aarch64_sve_trn1:
3217 Op.getOperand(1),
Op.getOperand(2));
3218 case Intrinsic::aarch64_sve_trn2:
3220 Op.getOperand(1),
Op.getOperand(2));
3221 case Intrinsic::aarch64_sve_uzp1:
3223 Op.getOperand(1),
Op.getOperand(2));
3224 case Intrinsic::aarch64_sve_uzp2:
3226 Op.getOperand(1),
Op.getOperand(2));
3227 case Intrinsic::aarch64_sve_zip1:
3229 Op.getOperand(1),
Op.getOperand(2));
3230 case Intrinsic::aarch64_sve_zip2:
3232 Op.getOperand(1),
Op.getOperand(2));
3233 case Intrinsic::aarch64_sve_ptrue:
3236 case Intrinsic::aarch64_sve_dupq_lane:
3237 return LowerDUPQLane(Op, DAG);
3238 case Intrinsic::aarch64_sve_convert_from_svbool:
3241 case Intrinsic::aarch64_sve_convert_to_svbool: {
3243 EVT InVT =
Op.getOperand(1).getValueType();
3247 return Op.getOperand(1);
3257 case Intrinsic::aarch64_sve_insr: {
3264 Op.getOperand(1), Scalar);
3267 case Intrinsic::localaddress: {
3270 unsigned Reg =
RegInfo->getLocalAddressRegister(MF);
3272 Op.getSimpleValueType());
3275 case Intrinsic::eh_recoverfp: {
3285 "llvm.eh.recoverfp must take a function as the first argument");
3289 case Intrinsic::aarch64_neon_vsri:
3290 case Intrinsic::aarch64_neon_vsli: {
3291 EVT Ty =
Op.getValueType();
3296 assert(
Op.getConstantOperandVal(3) <= Ty.getScalarSizeInBits());
3300 return DAG.
getNode(Opcode, dl, Ty,
Op.getOperand(1),
Op.getOperand(2),
3304 case Intrinsic::aarch64_neon_srhadd:
3305 case Intrinsic::aarch64_neon_urhadd: {
3308 return DAG.
getNode(Opcode, dl,
Op.getValueType(),
Op.getOperand(1),
3314bool AArch64TargetLowering::isVectorLoadExtDesirable(
SDValue ExtVal)
const {
3315 return ExtVal.getValueType().isScalableVector();
3336 {Undef, Undef, Undef, Undef});
3347 ST->getBasePtr(), ST->getMemOperand());
3365 if (useSVEForFixedLengthVectorVT(VT))
3366 return LowerFixedLengthVectorStoreToSVE(Op, DAG);
3368 unsigned AS =
StoreNode->getAddressSpace();
3370 if (Alignment <
MemVT.getStoreSize() &&
3383 if (
StoreNode->isNonTemporal() &&
MemVT.getSizeInBits() == 256u &&
3384 MemVT.getVectorElementCount().Min % 2u == 0 &&
3385 ((
MemVT.getScalarSizeInBits() == 8u ||
3386 MemVT.getScalarSizeInBits() == 16u ||
3387 MemVT.getScalarSizeInBits() == 32u ||
3388 MemVT.getScalarSizeInBits() == 64u))) {
3400 {StoreNode->getChain(), Lo, Hi, StoreNode->getBasePtr()},
3414 {StoreNode->getChain(), Lo, Hi, StoreNode->getBasePtr()},
3427 switch (Op.getOpcode()) {
3434 return LowerGlobalAddress(Op, DAG);
3436 return LowerGlobalTLSAddress(Op, DAG);
3440 return LowerSETCC(Op, DAG);
3442 return LowerBR_CC(Op, DAG);
3444 return LowerSELECT(Op, DAG);
3446 return LowerSELECT_CC(Op, DAG);
3448 return LowerJumpTable(Op, DAG);
3450 return LowerBR_JT(Op, DAG);
3452 return LowerConstantPool(Op, DAG);
3454 return LowerBlockAddress(Op, DAG);
3456 return LowerVASTART(Op, DAG);
3458 return LowerVACOPY(Op, DAG);
3460 return LowerVAARG(Op, DAG);
3474 if (useSVEForFixedLengthVectorVT(Op.getValueType()))
3476 return LowerF128Call(Op, DAG, RTLIB::ADD_F128);
3478 return LowerF128Call(Op, DAG, RTLIB::SUB_F128);
3480 return LowerF128Call(Op, DAG, RTLIB::MUL_F128);
3484 return LowerF128Call(Op, DAG, RTLIB::DIV_F128);
3487 return LowerFP_ROUND(Op, DAG);
3489 return LowerFP_EXTEND(Op, DAG);
3491 return LowerFRAMEADDR(Op, DAG);
3493 return LowerSPONENTRY(Op, DAG);
3495 return LowerRETURNADDR(Op, DAG);
3497 return LowerADDROFRETURNADDR(Op, DAG);
3499 return LowerINSERT_VECTOR_ELT(Op, DAG);
3501 return LowerEXTRACT_VECTOR_ELT(Op, DAG);
3503 return LowerBUILD_VECTOR(Op, DAG);
3505 return LowerVECTOR_SHUFFLE(Op, DAG);
3507 return LowerSPLAT_VECTOR(Op, DAG);
3509 return LowerEXTRACT_SUBVECTOR(Op, DAG);
3511 return LowerINSERT_SUBVECTOR(Op, DAG);
3527 return LowerVectorSRA_SRL_SHL(Op, DAG);
3529 return LowerShiftLeftParts(Op, DAG);
3532 return LowerShiftRightParts(Op, DAG);
3534 return LowerCTPOP(Op, DAG);
3536 return LowerFCOPYSIGN(Op, DAG);
3538 return LowerVectorOR(Op, DAG);
3547 return LowerINT_TO_FP(Op, DAG);
3552 return LowerFP_TO_INT(Op, DAG);
3554 return LowerFSINCOS(Op, DAG);
3556 return LowerFLT_ROUNDS_(Op, DAG);
3560 return LowerINTRINSIC_WO_CHAIN(Op, DAG);
3562 return LowerSTORE(Op, DAG);
3570 return LowerVECREDUCE(Op, DAG);
3572 return LowerATOMIC_LOAD_SUB(Op, DAG);
3574 return LowerATOMIC_LOAD_AND(Op, DAG);
3576 return LowerDYNAMIC_STACKALLOC(Op, DAG);
3578 return LowerVSCALE(Op, DAG);
3580 return LowerTRUNCATE(Op, DAG);
3582 if (useSVEForFixedLengthVectorVT(Op.getValueType()))
3583 return LowerFixedLengthVectorLoadToSVE(Op, DAG);
3586 if (useSVEForFixedLengthVectorVT(Op.getValueType()))
3592bool AArch64TargetLowering::useSVEForFixedLengthVectors()
const {
3597bool AArch64TargetLowering::useSVEForFixedLengthVectorVT(
EVT VT)
const {
3598 if (!useSVEForFixedLengthVectors())
3645 bool IsVarArg)
const {
3682SDValue AArch64TargetLowering::LowerFormalArguments(
3702 unsigned NumArgs = Ins.size();
3705 for (
unsigned i = 0;
i != NumArgs; ++
i) {
3706 MVT ValVT = Ins[
i].VT;
3707 if (Ins[
i].isOrigArg()) {
3724 assert(!Res &&
"Call operand has unhandled type");
3729 for (
unsigned i = 0, e =
ArgLocs.size();
i !=
e; ++
i) {
3732 if (Ins[
i].Flags.isByVal()) {
3736 int Size =
Ins[
i].Flags.getByValSize();
3737 unsigned NumRegs = (
Size + 7) / 8;
3750 if (
VA.isRegLoc()) {
3752 EVT RegVT =
VA.getLocVT();
3756 RC = &AArch64::GPR32RegClass;
3758 RC = &AArch64::GPR64RegClass;
3760 RC = &AArch64::FPR16RegClass;
3762 RC = &AArch64::FPR32RegClass;
3764 RC = &AArch64::FPR64RegClass;
3766 RC = &AArch64::FPR128RegClass;
3769 RC = &AArch64::PPRRegClass;
3771 RC = &AArch64::ZPRRegClass;
3782 switch (
VA.getLocInfo()) {
3788 assert(
VA.getValVT().isScalableVector() &&
3789 "Only scalable vectors can be passed indirectly");
3805 assert(
VA.isMemLoc() &&
"CCValAssign is neither reg nor mem");
3808 ?
VA.getLocVT().getSizeInBits()
3809 :
VA.getValVT().getSizeInBits()) / 8;
3813 !
Ins[
i].Flags.isInConsecutiveRegs())
3825 switch (
VA.getLocInfo()) {
3833 assert(
VA.getValVT().isScalableVector() &&
3834 "Only scalable vectors can be passed indirectly");
3849 ExtType,
DL,
VA.getLocVT(), Chain,
FIN,
3856 assert(
VA.getValVT().isScalableVector() &&
3857 "Only scalable vectors can be passed indirectly");
3866 InVals.push_back(ArgValue);
3878 saveVarArgRegisters(CCInfo, DAG,
DL, Chain);
3882 unsigned StackOffset = CCInfo.getNextStackOffset();
3898 if (!CCInfo.isAllocated(AArch64::X8)) {
3909 for (
unsigned I = 0,
E =
Ins.size();
I !=
E; ++
I) {
3910 if (Ins[
I].Flags.isInReg()) {
3927 if (DoesCalleeRestoreStack(CallConv,
TailCallOpt)) {
3951void AArch64TargetLowering::saveVarArgRegisters(
CCState &CCInfo,
3964 AArch64::X3, AArch64::X4, AArch64::X5,
3965 AArch64::X6, AArch64::X7 };
4002 AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3,
4003 AArch64::Q4, AArch64::Q5, AArch64::Q6, AArch64::Q7};
4037SDValue AArch64TargetLowering::LowerCallResult(
4053 for (
unsigned i = 0;
i !=
RVLocs.size(); ++
i) {
4060 "unexpected return calling convention register assignment");
4076 switch (
VA.getLocInfo()) {
4095 InVals.push_back(Val);
4119bool AArch64TargetLowering::isEligibleForTailCallOptimization(
4154 if (
i->hasByValAttr())
4163 if (
i->hasInRegAttr())
4181 (!
TT.isOSWindows() ||
TT.isOSBinFormatELF() ||
TT.isOSBinFormatMachO()))
4192 "Unexpected variadic calling convention");
4195 if (isVarArg && !Outs.empty()) {
4246 A.getValVT().isScalableVector()) &&
4247 "Expected value to be scalable");
4283 if (FI->getIndex() < 0) {
4305AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
4314 bool &IsTailCall = CLI.IsTailCall;
4316 bool IsVarArg = CLI.IsVarArg;
4333 return In.VT.isScalableVector();
4342 IsTailCall = isEligibleForTailCallOptimization(
4343 Callee, CallConv, IsVarArg, Outs, OutVals, Ins, DAG);
4344 if (!IsTailCall && CLI.CB && CLI.CB->isMustTailCall())
4346 "site marked musttail");
4365 unsigned NumArgs = Outs.size();
4367 for (
unsigned i = 0;
i != NumArgs; ++
i) {
4368 MVT ArgVT = Outs[
i].VT;
4373 assert(!Res &&
"Call operand has unhandled type");
4383 unsigned NumArgs = Outs.size();
4384 for (
unsigned i = 0;
i != NumArgs; ++
i) {
4385 MVT ValVT = Outs[
i].VT;
4388 CLI.getArgs()[Outs[
i].OrigArgIndex].Ty,
4400 assert(!Res &&
"Call operand has unhandled type");
4426 NumBytes =
alignTo(NumBytes, 16);
4438 assert(FPDiff % 16 == 0 &&
"unaligned stack on tail call");
4454 if (IsVarArg && CLI.CB && CLI.CB->isMustTailCall()) {
4463 for (
unsigned i = 0, e =
ArgLocs.size();
i !=
e; ++
i) {
4469 switch (
VA.getLocInfo()) {
4504 assert(
VA.getValVT().isScalableVector() &&
4505 "Only scalable vectors can be passed indirectly");
4510 VA.getValVT().getStoreSize().getKnownMinSize(), Alignment,
false);
4516 Chain,
DL,
Arg, SpillSlot,
4522 if (
VA.isRegLoc()) {
4523 if (
i == 0 && Flags.isReturned() && !Flags.isSwiftSelf() &&
4526 "unexpected calling convention register assignment");
4528 "unexpected use of 'returned'");
4531 if (RegsUsed.count(
VA.getLocReg())) {
4538 [=](
const std::pair<unsigned, SDValue> &
Elt) {
4539 return Elt.first == VA.getLocReg();
4546 CSInfo.erase(std::remove_if(CSInfo.begin(), CSInfo.end(),
4548 return ArgReg.Reg == VA.getLocReg();
4553 RegsUsed.insert(
VA.getLocReg());
4556 CSInfo.emplace_back(
VA.getLocReg(),
i);
4569 OpSize =
VA.getLocVT().getSizeInBits();
4571 OpSize = Flags.isByVal() ? Flags.getByValSize() * 8
4572 :
VA.getValVT().getSizeInBits();
4575 !Flags.isInConsecutiveRegs()) {
4595 Chain = addTokenForArgument(Chain, DAG, MF.
getFrameInfo(), FI);
4604 if (Outs[
i].Flags.isByVal()) {
4609 Outs[
i].Flags.getNonZeroByValAlign(),
4644 auto GV =
G->getGlobal();
4657 const char *Sym = S->getSymbol();
4661 const char *Sym = S->getSymbol();
4676 std::vector<SDValue> Ops;
4677 Ops.push_back(Chain);
4690 Ops.push_back(DAG.getRegister(
RegToPass.first,
4698 Mask =
TRI->getThisReturnPreservedMask(MF, CallConv);
4701 Mask =
TRI->getCallPreservedMask(MF, CallConv);
4704 Mask =
TRI->getCallPreservedMask(MF, CallConv);
4707 TRI->UpdateCustomCallPreservedMask(MF, &Mask);
4709 if (
TRI->isAnyArgRegReserved(MF))
4710 TRI->emitReservedArgRegCallError(MF);
4712 assert(Mask &&
"Missing call preserved mask for calling convention");
4746 return LowerCallResult(Chain,
InFlag, CallConv, IsVarArg, Ins,
DL, DAG,
4751bool AArch64TargetLowering::CanLowerReturn(
4786 assert(
VA.isRegLoc() &&
"Can only return in registers!");
4789 switch (
VA.getLocInfo()) {
4816 if (RegsUsed.count(
VA.getLocReg())) {
4819 [=](
const std::pair<unsigned, SDValue> &
Elt) {
4820 return Elt.first == VA.getLocReg();
4826 RegsUsed.insert(
VA.getLocReg());
4831 for (
auto &RetVal :
RetVals) {
4832 Chain = DAG.
getCopyToReg(Chain,
DL, RetVal.first, RetVal.second, Flag);
4835 DAG.
getRegister(RetVal.first, RetVal.second.getValueType()));
4859 if (AArch64::GPR64RegClass.
contains(*
I))
4861 else if (AArch64::FPR64RegClass.
contains(*
I))
4883 unsigned Flag)
const {
4885 N->getOffset(), Flag);
4890 unsigned Flag)
const {
4896 unsigned Flag)
const {
4898 N->getOffset(), Flag);
4903 unsigned Flag)
const {
4908template <
class NodeTy>
4910 unsigned Flags)
const {
4921template <
class NodeTy>
4923 unsigned Flags)
const {
4937template <
class NodeTy>
4939 unsigned Flags)
const {
4951template <
class NodeTy>
4953 unsigned Flags)
const {
4957 SDValue Sym = getTargetNode(
N, Ty, DAG, Flags);
4969 "unexpected offset in global node");
5022AArch64TargetLowering::LowerDarwinGlobalTLSAddress(
SDValue Op,
5025 "This function expects a Darwin target");
5196AArch64TargetLowering::LowerELFGlobalTLSAddress(
SDValue Op,
5212 "in local exec TLS model");
5283AArch64TargetLowering::LowerWindowsGlobalTLSAddress(
SDValue Op,
5322 Chain =
TLS.getValue(1);
5348 return LowerDarwinGlobalTLSAddress(Op, DAG);
5350 return LowerELFGlobalTLSAddress(Op, DAG);
5352 return LowerWindowsGlobalTLSAddress(Op, DAG);
5369 bool ProduceNonFlagSettingCondBr =
5414 if (
RHSC &&
RHSC->getZExtValue() == 0 && ProduceNonFlagSettingCondBr) {
5494 EVT VT =
Op.getValueType();
5501 if (
SrcVT.bitsLT(VT))
5503 else if (
SrcVT.bitsGT(VT))
5568 Attribute::NoImplicitFloat))
5584 EVT VT =
Op.getValueType();
5612 "Unexpected type for custom ctpop lowering");
5619 unsigned EltSize = 8;
5635 if (
Op.getValueType().isVector())
5636 return LowerVSETCC(Op, DAG);
5643 Chain =
Op.getOperand(0);
5650 EVT VT =
Op.getValueType();
5663 "Unexpected setcc expansion!");
5917 return LowerSELECT_CC(CC, LHS, RHS,
TVal,
FVal,
DL, DAG);
5927 EVT Ty =
Op.getValueType();
5928 if (Ty.isScalableVector()) {
5963 return LowerSELECT_CC(CC, LHS, RHS,
TVal,
FVal,
DL, DAG);
5974 return getAddrLarge(JT, DAG);
5976 return getAddrTiny(JT, DAG);
5978 return getAddr(JT, DAG);
6004 return getGOT(CP, DAG);
6006 return getAddrLarge(CP, DAG);
6008 return getAddrTiny(CP, DAG);
6010 return getAddr(CP, DAG);
6019 return getAddrLarge(BA, DAG);
6021 return getAddrTiny(BA, DAG);
6023 return getAddr(BA, DAG);
6048 : FuncInfo->getVarArgsStackIndex(),
6129 return LowerWin64_VASTART(Op, DAG);
6131 return LowerDarwin_VASTART(Op, DAG);
6133 return LowerAAPCS_VASTART(Op, DAG);
6155 "automatic va_arg instruction only works on Darwin");
6158 EVT VT =
Op.getValueType();
6168 Chain =
VAList.getValue(1);
6223 EVT VT =
Op.getValueType();
6249#define GET_REGISTER_MATCHER
6250#include "AArch64GenAsmMatcher.inc"
6257 if (AArch64::X1 <= Reg && Reg <= AArch64::X28) {
6259 unsigned DwarfRegNum =
MRI->getDwarfRegNum(Reg,
false);
6273 EVT VT =
Op.getValueType();
6289 EVT VT =
Op.getValueType();
6301 unsigned Reg = MF.
addLiveIn(AArch64::LR, &AArch64::GPR64RegClass);
6309 assert(
Op.getNumOperands() == 3 &&
"Not a double-shift!");
6310 EVT VT =
Op.getValueType();
6366 assert(
Op.getNumOperands() == 3 &&
"Not a double-shift!");
6367 EVT VT =
Op.getValueType();
6421 bool OptForSize)
const {
6448 unsigned Limit = (OptForSize ? 1 : (Subtarget->
hasFuseLiterals() ? 5 : 2));
6453 <<
" imm value: "; Imm.
dump(););
6465 if (ST->hasNEON() &&
6477 return DAG.
getNode(Opcode,
SDLoc(Operand), VT, Operand);
6579const char *AArch64TargetLowering::LowerXConstraint(
EVT ConstraintVT)
const {
6609 if (Constraint ==
"Upa")
6611 if (Constraint ==
"Upl")
6619AArch64TargetLowering::getConstraintType(
StringRef Constraint)
const {
6620 if (Constraint.
size() == 1) {
6621 switch (Constraint[0]) {
6655AArch64TargetLowering::getSingleConstraintMatchWeight(
6658 Value *CallOperandVal =
info.CallOperandVal;
6661 if (!CallOperandVal)
6686std::pair<unsigned, const TargetRegisterClass *>
6687AArch64TargetLowering::getRegForInlineAsmConstraint(
6689 if (Constraint.
size() == 1) {
6690 switch (Constraint[0]) {
6693 return std::make_pair(0U, &AArch64::GPR64commonRegClass);
6694 return std::make_pair(0U, &AArch64::GPR32commonRegClass);
6699 return std::make_pair(0U, &AArch64::ZPRRegClass);
6701 return std::make_pair(0U, &AArch64::FPR16RegClass);
6703 return std::make_pair(0U, &AArch64::FPR32RegClass);
6705 return std::make_pair(0U, &AArch64::FPR64RegClass);
6707 return std::make_pair(0U, &AArch64::FPR128RegClass);
6715 return std::make_pair(0U, &AArch64::ZPR_4bRegClass);
6717 return std::make_pair(0U, &AArch64::FPR128_loRegClass);
6723 return std::make_pair(0U, &AArch64::ZPR_3bRegClass);
6731 return restricted ? std::make_pair(0U, &AArch64::PPR_3bRegClass)
6735 if (
StringRef(
"{cc}").equals_lower(Constraint))
6736 return std::make_pair(
unsigned(AArch64::NZCV), &AArch64::CCRRegClass);
6740 std::pair<unsigned, const TargetRegisterClass *> Res;
6746 if ((
Size == 4 ||
Size == 5) && Constraint[0] ==
'{' &&
6747 tolower(Constraint[1]) ==
'v' && Constraint[
Size - 1] ==
'}') {
6750 if (!
Failed && RegNo >= 0 && RegNo <= 31) {
6755 Res.first = AArch64::FPR64RegClass.getRegister(RegNo);
6756 Res.second = &AArch64::FPR64RegClass;
6758 Res.first = AArch64::FPR128RegClass.getRegister(RegNo);
6759 Res.second = &AArch64::FPR128RegClass;
6765 if (Res.second && !Subtarget->
hasFPARMv8() &&
6766 !AArch64::GPR32allRegClass.hasSubClassEq(Res.second) &&
6767 !AArch64::GPR64allRegClass.hasSubClassEq(Res.second))
6768 return std::make_pair(0U,
nullptr);
6775void AArch64TargetLowering::LowerAsmOperandForConstraint(
6776 SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
6781 if (Constraint.length() != 1)
6806 GA->getValueType(0));
6831 uint64_t CVal =
C->getZExtValue();
6844 uint64_t
NVal = -
C->getSExtValue();
6846 CVal =
C->getSExtValue();
6877 if ((CVal & 0xFFFF) == CVal)
6879 if ((CVal & 0xFFFF0000ULL) == CVal)
6891 if ((CVal & 0xFFFFULL) == CVal)
6893 if ((CVal & 0xFFFF0000ULL) == CVal)
6895 if ((CVal & 0xFFFF00000000ULL) == CVal)
6897 if ((CVal & 0xFFFF000000000000ULL) == CVal)
6899 uint64_t
NCVal = ~CVal;
6906 if ((
NCVal & 0xFFFF000000000000ULL) ==
NCVal)
6920 Ops.push_back(Result);
6947 EVT EltType = V.getValueType().getVectorElementType();
6948 return EltType.getSizeInBits() / 8;
6968 LLVM_DEBUG(
dbgs() <<
"AArch64TargetLowering::ReconstructShuffle\n");
6970 EVT VT = Op.getValueType();
6989 : Vec(Vec),
MinElt(std::numeric_limits<unsigned>::max()),
MaxElt(0),
7005 dbgs() <<
"Reshuffle failed: "
7006 "a shuffle can only come from building a vector from "
7007 "various elements of other vectors, provided their "
7008 "indices are constant\n");
7015 if (Source == Sources.end())
7020 Source->MinElt = std::min(Source->MinElt,
EltNo);
7021 Source->MaxElt = std::max(Source->MaxElt,
EltNo);
7024 if (Sources.size() > 2) {
7026 dbgs() <<
"Reshuffle failed: currently only do something sane when at "
7027 "most two source vectors are involved\n");
7034 for (
auto &Source : Sources) {
7035 EVT SrcEltTy = Source.Vec.getValueType().getVectorElementType();
7048 for (
auto &Src : Sources) {
7049 EVT SrcVT = Src.ShuffleVec.getValueType();
7066 DAG.
getUNDEF(Src.ShuffleVec.getValueType()));
7074 dbgs() <<
"Reshuffle failed: span too large for a VEXT to cope\n");
7102 Src.WindowBase = -Src.MinElt;
7109 for (
auto &Src : Sources) {
7110 EVT SrcEltTy = Src.ShuffleVec.getValueType().getVectorElementType();
7116 Src.WindowBase *= Src.WindowScale;
7129 if (Entry.isUndef())
7132 auto Src =
find(Sources, Entry.getOperand(0));
7138 EVT OrigEltTy = Entry.getOperand(0).getValueType().getVectorElementType();
7155 LLVM_DEBUG(
dbgs() <<
"Reshuffle failed: illegal shuffle mask\n");
7160 for (
unsigned i = 0;
i < Sources.size(); ++
i)
7168 dbgs() <<
"Reshuffle, creating node: "; V.dump(););
7218 [&](
int Elt) {return Elt != ExpectedElt++ && Elt != -1;});
7249 "Only possible block sizes for REV are: 16, 32, 64");
7281 if ((M[
i] >= 0 && (
unsigned)M[
i] !=
Idx) ||
7326 if ((M[
i] >= 0 && (
unsigned)M[
i] !=
Idx) ||
7341 for (
unsigned j = 0;
j != 2; ++
j) {
7343 for (
unsigned i = 0;
i != Half; ++
i) {
7344 int MIdx = M[
i +
j * Half];
7431 EVT VT = Op.getValueType();
7440 bool SplitV0 =
V0.getValueSizeInBits() == 128;
7450 if (
V1.getValueSizeInBits() == 128) {
7462 unsigned OpNum = (
PFEntry >> 26) & 0x0F;
7485 if (
LHSID == (1 * 9 + 2) * 9 + 3)
7487 assert(
LHSID == ((4 * 9 + 5) * 9 + 6) * 9 + 7 &&
"Illegal OP_COPY!");
7566 SDValue V2 = Op.getOperand(1);
7569 EVT EltVT = Op.getValueType().getVectorElementType();
7573 for (
int Val : ShuffleMask) {
7574 for (
unsigned Byte = 0; Byte <
BytesPerElt; ++Byte) {
7582 if (Op.getValueSizeInBits() == 128) {
7591 if (V2.getNode()->isUndef()) {
7640 EVT VT =
Op.getValueType();
7653 if (
SVN->isSplat()) {
7654 int Lane =
SVN->getSplatIndex();
7669 unsigned Opcode =
getDUPLANEOp(
V1.getValueType().getVectorElementType());
7708 Lane +=
V1.getConstantOperandVal(1);
7709 V1 =
V1.getOperand(0);
7747 return DAG.
getNode(Opc, dl,
V1.getValueType(),
V1, V2);
7751 return DAG.
getNode(Opc, dl,
V1.getValueType(),
V1, V2);
7755 return DAG.
getNode(Opc, dl,
V1.getValueType(),
V1, V2);
7805 for (
unsigned i = 0;
i != 4; ++
i) {
7806 if (ShuffleMask[
i] < 0)
7816 unsigned Cost = (
PFEntry >> 30);
7828 EVT VT =
Op.getValueType();
7835 switch (
ElemVT.getSimpleVT().SimpleTy) {
7840 if (ConstVal->isOne())
7841 return getPTrue(DAG, dl, VT, AArch64SVEPredPattern::all);
7879 EVT VT =
Op.getValueType();
7893 if (
CIdx && (
CIdx->getZExtValue() <= 3)) {
7926 EVT VT =
BVN->getValueType(0);
7927 APInt SplatBits, SplatUndef;
7928 unsigned SplatBitSize;
7930 if (
BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
HasAnyUndefs)) {
7948 const APInt &Bits) {
7949 if (Bits.getHiBits(64) == Bits.getLoBits(64)) {
7950 uint64_t
Value = Bits.zextOrTrunc(64).getZExtValue();
7951 EVT VT = Op.getValueType();
7970 const SDValue *LHS =
nullptr) {
7971 if (Bits.getHiBits(64) == Bits.getLoBits(64)) {
7972 uint64_t
Value = Bits.zextOrTrunc(64).getZExtValue();
7973 EVT VT = Op.getValueType();
8018 const SDValue *LHS =
nullptr) {
8019 if (Bits.getHiBits(64) == Bits.getLoBits(64)) {
8020 uint64_t
Value = Bits.zextOrTrunc(64).getZExtValue();
8021 EVT VT = Op.getValueType();
8058 if (Bits.getHiBits(64) == Bits.getLoBits(64)) {
8059 uint64_t
Value = Bits.zextOrTrunc(64).getZExtValue();
8060 EVT VT = Op.getValueType();
8088 const APInt &Bits) {
8089 if (Bits.getHiBits(64) == Bits.getLoBits(64)) {
8090 uint64_t
Value = Bits.zextOrTrunc(64).getZExtValue();
8091 EVT VT = Op.getValueType();
8109 const APInt &Bits) {
8110 if (Bits.getHiBits(64) == Bits.getLoBits(64)) {
8111 uint64_t
Value = Bits.zextOrTrunc(64).getZExtValue();
8112 EVT VT = Op.getValueType();
8142 uint64_t &ConstVal) {
8149 EVT VT =
Bvec->getValueType(0);
8154 ConstVal =
FirstElt->getZExtValue();
8159 unsigned Opcode =
N->getOpcode();
8165 if (IID < Intrinsic::num_intrinsics)
8179 EVT VT =
N->getValueType(0);
8234 uint64_t
C2 =
C2node->getZExtValue();
8266 EVT VT =
Op.getValueType();
8273 LHS =
Op.getOperand(1);
8307 EVT VT = Op.getValueType();
8314 for (
SDValue Lane : Op->ops()) {
8323 }
else if (Lane.getNode()->isUndef()) {
8327 "Unexpected BUILD_VECTOR operand type");
8329 Ops.push_back(Lane);
8335 EVT VT = Op.getValueType();
8377 EVT VT =
Op.getValueType();
8387 if (
BVN->isConstant())
8391 Const->getAPIntValue().zextOrTrunc(BitSize).getZExtValue());
8392 if (Val.isNullValue() || Val.isAllOnesValue())
8437 else if (ConstantValue != V)
8441 if (!
Value.getNode())
8443 else if (V !=
Value)
8447 if (!
Value.getNode()) {
8449 dbgs() <<
"LowerBUILD_VECTOR: value undefined, creating undef node\n");
8457 LLVM_DEBUG(
dbgs() <<
"LowerBUILD_VECTOR: only low element used, creating 1 "
8458 "SCALAR_TO_VECTOR node\n");
8470 const SDNode *
N = V.getNode();
8496 if (Val - 1 == 2 *
i) {
8528 Value.getValueType() != VT) {
8530 dbgs() <<
"LowerBUILD_VECTOR: use DUP for non-constant splats\n");
8538 if (
Value.getValueSizeInBits() == 64) {
8540 dbgs() <<
"LowerBUILD_VECTOR: DUPLANE works on 128-bit vectors, "
8553 EltTy ==
MVT::f64) &&
"Unsupported floating-point vector type");
8555 dbgs() <<
"LowerBUILD_VECTOR: float constant splats, creating int "
8556 "BITCASTS, and try again\n");
8562 LLVM_DEBUG(
dbgs() <<
"LowerBUILD_VECTOR: trying to lower new vector: ";
8564 Val = LowerBUILD_VECTOR(Val, DAG);
8599 dbgs() <<
"LowerBUILD_VECTOR: all elements are constant, use default "
8618 dbgs() <<
"LowerBUILD_VECTOR: alternatives failed, creating sequence "
8619 "of INSERT_VECTOR_ELT\n");
8636 LLVM_DEBUG(
dbgs() <<
"Creating node for op0, it is not undefined:\n");
8641 <<
"Creating nodes for the other vector elements:\n";);
8653 dbgs() <<
"LowerBUILD_VECTOR: use default expansion, failed to find "
8654 "better alternative\n");
8663 EVT VT =
Op.getOperand(0).getValueType();
8687 Op.getOperand(1),
Op.getOperand(2));
8693AArch64TargetLowering::LowerEXTRACT_VECTOR_ELT(
SDValue Op,
8698 EVT VT =
Op.getOperand(0).getValueType();
8732 assert(
Op.getValueType().isFixedLengthVector() &&
8733 "Only cases that extract a fixed length vector are supported!");
8735 EVT InVT =
Op.getOperand(0).getValueType();
8737 unsigned Size =
Op.getValueSizeInBits();
8739 if (
InVT.isScalableVector()) {
8748 if (
Idx == 0 &&
InVT.getSizeInBits() <= 128)
8753 if (
Size == 64 &&
Idx *
InVT.getScalarSizeInBits() == 64)
8761 assert(
Op.getValueType().isScalableVector() &&
8762 "Only expect to lower inserts into scalable vectors!");
8764 EVT InVT =
Op.getOperand(1).getValueType();
8768 if (
InVT.isScalableVector() || !useSVEForFixedLengthVectorVT(
InVT))
8780 if (useSVEForFixedLengthVectorVT(VT))
8786 for (
unsigned i = 0;
i != 4; ++
i) {
8797 unsigned Cost = (
PFEntry >> 30);
8826 Op = Op.getOperand(0);
8828 APInt SplatBits, SplatUndef;
8829 unsigned SplatBitSize;
8831 if (!
BVN || !
BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
8844 assert(VT.
isVector() &&
"vector shift count is not a vector type");
8855 assert(VT.
isVector() &&
"vector shift count is not a vector type");
8872 EVT VT =
Op.getValueType();
8877 EVT OpVT =
Op.getOperand(0).getValueType();
8887 if (useSVEForFixedLengthVectorVT(
Op.getOperand(0).getValueType()))
8888 return LowerFixedLengthVectorTruncateToSVE(Op, DAG);
8901 if (ShiftAmount != 1)
8949 EVT VT =
Op.getValueType();
8953 if (!
Op.getOperand(1).getValueType().isVector())
8957 switch (
Op.getOpcode()) {
8971 Op.getOperand(0),
Op.getOperand(1));
8977 return LowerToPredicatedOp(Op, DAG, Opc);
8991 unsigned Opc = (
Op.getOpcode() ==
ISD::SRA) ? Intrinsic::aarch64_neon_sshl
9010 "function only supposed to emit natural comparisons");
9018 if (
SrcVT.getVectorElementType().isFloatingPoint()) {
9102 if (
Op.getValueType().isScalableVector()) {
9103 if (
Op.getOperand(0).getValueType().isFloatingPoint())
9156 if (!
Cmp2.getNode())
9181 switch (
Op.getOpcode()) {
9193 assert(
Op->getFlags().hasNoNaNs() &&
"fmax vector reduction needs NoNaN flag");
9200 assert(
Op->getFlags().hasNoNaNs() &&
"fmin vector reduction needs NoNaN flag");
9219 MVT VT =
Op.getSimpleValueType();
9224 Op.getOperand(0),
Op.getOperand(1), RHS,
9225 AN->getMemOperand());
9236 MVT VT =
Op.getSimpleValueType();
9241 Op.getOperand(0),
Op.getOperand(1), RHS,
9242 AN->getMemOperand());
9245SDValue AArch64TargetLowering::LowerWindowsDYNAMIC_STACKALLOC(
9274AArch64TargetLowering::LowerDYNAMIC_STACKALLOC(
SDValue Op,
9277 "Only Windows alloca probing supported");
9285 EVT VT =
Node->getValueType(0);
9288 "no-stack-arg-probe")) {
9302 Chain = LowerWindowsDYNAMIC_STACKALLOC(Op, Chain,
Size, DAG);
9321 EVT VT =
Op.getValueType();
9331template <
unsigned NumVecs>
9360 unsigned Intrinsic)
const {
9361 auto &
DL =
I.getModule()->getDataLayout();
9362 switch (Intrinsic) {
9363 case Intrinsic::aarch64_sve_st2:
9365 case Intrinsic::aarch64_sve_st3:
9367 case Intrinsic::aarch64_sve_st4:
9369 case Intrinsic::aarch64_neon_ld2:
9370 case Intrinsic::aarch64_neon_ld3:
9371 case Intrinsic::aarch64_neon_ld4:
9372 case Intrinsic::aarch64_neon_ld1x2:
9373 case Intrinsic::aarch64_neon_ld1x3:
9374 case Intrinsic::aarch64_neon_ld1x4:
9375 case Intrinsic::aarch64_neon_ld2lane:
9376 case Intrinsic::aarch64_neon_ld3lane:
9377 case Intrinsic::aarch64_neon_ld4lane:
9378 case Intrinsic::aarch64_neon_ld2r:
9379 case Intrinsic::aarch64_neon_ld3r:
9380 case Intrinsic::aarch64_neon_ld4r: {
9383 uint64_t
NumElts =
DL.getTypeSizeInBits(
I.getType()) / 64;
9385 Info.ptrVal =
I.getArgOperand(
I.getNumArgOperands() - 1);
9392 case Intrinsic::aarch64_neon_st2:
9393 case Intrinsic::aarch64_neon_st3:
9394 case Intrinsic::aarch64_neon_st4:
9395 case Intrinsic::aarch64_neon_st1x2:
9396 case Intrinsic::aarch64_neon_st1x3:
9397 case Intrinsic::aarch64_neon_st1x4:
9398 case Intrinsic::aarch64_neon_st2lane:
9399 case Intrinsic::aarch64_neon_st3lane:
9400 case Intrinsic::aarch64_neon_st4lane: {
9406 if (!
ArgTy->isVectorTy())
9411 Info.ptrVal =
I.getArgOperand(
I.getNumArgOperands() - 1);
9418 case Intrinsic::aarch64_ldaxr:
9419 case Intrinsic::aarch64_ldxr: {
9423 Info.ptrVal =
I.getArgOperand(0);
9429 case Intrinsic::aarch64_stlxr:
9430 case Intrinsic::aarch64_stxr: {
9434 Info.ptrVal =
I.getArgOperand(1);
9440 case Intrinsic::aarch64_ldaxp:
9441 case Intrinsic::aarch64_ldxp:
9444 Info.ptrVal =
I.getArgOperand(0);
9446 Info.align =
Align(16);
9449 case Intrinsic::aarch64_stlxp:
9450 case Intrinsic::aarch64_stxp:
9453 Info.ptrVal =
I.getArgOperand(2);
9455 Info.align =
Align(16);
9458 case Intrinsic::aarch64_sve_ldnt1: {
9462 Info.ptrVal =
I.getArgOperand(1);
9466 if (Intrinsic == Intrinsic::aarch64_sve_ldnt1)
9470 case Intrinsic::aarch64_sve_stnt1: {
9473 Info.memVT =
MVT::getVT(
I.getOperand(0)->getType());
9474 Info.ptrVal =
I.getArgOperand(2);
9478 if (Intrinsic == Intrinsic::aarch64_sve_stnt1)
9522 if (!
Ty1->isIntegerTy() || !
Ty2->isIntegerTy())
9524 unsigned NumBits1 =
Ty1->getPrimitiveSizeInBits();
9525 unsigned NumBits2 =
Ty2->getPrimitiveSizeInBits();
9529 if (
VT1.isVector() ||
VT2.isVector() || !
VT1.isInteger() || !
VT2.isInteger())
9543 if (!
I->hasOneUse())
9549 !(
User->getOpcode() == Instruction::FSub ||
9550 User->getOpcode() == Instruction::FAdd))
9567 if (!
Ty1->isIntegerTy() || !
Ty2->isIntegerTy())
9569 unsigned NumBits1 =
Ty1->getPrimitiveSizeInBits();
9570 unsigned NumBits2 =
Ty2->getPrimitiveSizeInBits();
9574 if (
VT1.isVector() ||
VT2.isVector() || !
VT1.isInteger() || !
VT2.isInteger())
9591 return (
VT1.isSimple() && !
VT1.isVector() &&
VT1.isInteger() &&
9592 VT2.isSimple() && !
VT2.isVector() &&
VT2.isInteger() &&
9593 VT1.getSizeInBits() <= 32);
9596bool AArch64TargetLowering::isExtFreeImpl(
const Instruction *Ext)
const {
9601 if (Ext->getType()->isVectorTy())
9604 for (
const Use &U : Ext->
uses()) {
9613 case Instruction::Shl:
9619 auto &
DL = Ext->getModule()->getDataLayout();
9620 std::advance(
GTI, U.getOperandNo()-1);
9630 if (ShiftAmt == 0 || ShiftAmt > 4)
9634 case Instruction::Trunc:
9637 if (Instr->
getType() == Ext->getOperand(0)->getType())
9655 auto *HalfTy =
HalfV->getType();
9656 return FullTy->getPrimitiveSizeInBits().getFixedSize() ==
9657 2 * HalfTy->getPrimitiveSizeInBits().getFixedSize();
9663 return FullVT->getNumElements() == 2 *
HalfVT->getNumElements();
9695 return Ext->getType()->getScalarSizeInBits() ==
9696 2 * Ext->getOperand(0)->getType()->getScalarSizeInBits();
9729 if (!
I->getType()->isVectorTy())
9733 switch (II->getIntrinsicID()) {
9734 case Intrinsic::aarch64_neon_umull:
9737 Ops.push_back(&II->getOperandUse(0));
9738 Ops.push_back(&II->getOperandUse(1));
9741 case Intrinsic::aarch64_neon_pmull64:
9743 II->getArgOperand(1)))
9745 Ops.push_back(&II->getArgOperandUse(0));
9746 Ops.push_back(&II->getArgOperandUse(1));
9754 switch (
I->getOpcode()) {
9755 case Instruction::Sub:
9756 case Instruction::Add: {
9765 Ops.push_back(&
Ext1->getOperandUse(0));
9766 Ops.push_back(&
Ext2->getOperandUse(0));
9769 Ops.push_back(&
I->getOperandUse(0));
9770 Ops.push_back(&
I->getOperandUse(1));
9787 unsigned NumBits =
LoadedType.getSizeInBits();
9788 return NumBits == 32 || NumBits == 64;
9796 return (
DL.getTypeSizeInBits(VecTy) + 127) / 128;
9810 unsigned VecSize =
DL.getTypeSizeInBits(VecTy);
9841 "Invalid interleave factor");
9842 assert(!Shuffles.empty() &&
"Empty shufflevector input");
9843 assert(Shuffles.size() == Indices.size() &&
9844 "Unmatched number of shufflevectors and indices");
9862 Type *EltTy =
FVTy->getElementType();
9876 FVTy->getNumElements() / NumLoads);
9881 BaseAddr = Builder.CreateBitCast(
9889 Intrinsic::aarch64_neon_ld3,
9890 Intrinsic::aarch64_neon_ld4};
9904 BaseAddr = Builder.CreateConstGEP1_32(
FVTy->getElementType(), BaseAddr,
9905 FVTy->getNumElements() * Factor);
9908 LdNFunc, Builder.CreateBitCast(BaseAddr, PtrTy),
"ldN");
9911 for (
unsigned i = 0;
i < Shuffles.size();
i++) {
9913 unsigned Index = Indices[
i];
9919 SubVec = Builder.CreateIntToPtr(
9921 FVTy->getNumElements()));
9934 SVI->replaceAllUsesWith(
WideVec);
9968 unsigned Factor)
const {
9970 "Invalid interleave factor");
9973 assert(VecTy->getNumElements() % Factor == 0 &&
"Invalid interleaved store");
9975 unsigned LaneLen = VecTy->getNumElements() / Factor;
9976 Type *EltTy = VecTy->getElementType();
9996 Type *IntTy =
DL.getIntPtrType(EltTy);
10002 Op0 = Builder.CreatePtrToInt(Op0,
IntVecTy);
10003 Op1 = Builder.CreatePtrToInt(Op1,
IntVecTy);
10009 Value *BaseAddr =
SI->getPointerOperand();
10011 if (NumStores > 1) {
10020 BaseAddr = Builder.CreateBitCast(
10022 SubVecTy->getElementType()->getPointerTo(
SI->getPointerAddressSpace()));
10027 Type *PtrTy =
SubVecTy->getPointerTo(
SI->getPointerAddressSpace());
10030 Intrinsic::aarch64_neon_st3,
10031 Intrinsic::aarch64_neon_st4};
10035 for (
unsigned StoreCount = 0; StoreCount < NumStores; ++StoreCount) {
10040 for (
unsigned i = 0;
i < Factor;
i++) {
10042 if (Mask[
IdxI] >= 0) {
10043 Ops.push_back(Builder.CreateShuffleVector(
10049 if (Mask[
IdxJ * Factor +
IdxI] >= 0) {
10059 Ops.push_back(Builder.CreateShuffleVector(
10066 if (StoreCount > 0)
10067 BaseAddr = Builder.CreateConstGEP1_32(
SubVecTy->getElementType(),
10070 Ops.push_back(Builder.CreateBitCast(BaseAddr, PtrTy));
10071 Builder.CreateCall(
StNFunc, Ops);
10096SDValue AArch64TargetLowering::LowerSVEStructLoad(
unsigned Intrinsic,
10102 unsigned N, Opcode;
10103 static std::map<unsigned, std::pair<unsigned, unsigned>>
IntrinsicMap = {
10110 "invalid tuple vector type!");
10122 for (
unsigned I = 0;
I <
N; ++
I)
10191 if (
Immed == std::numeric_limits<int64_t>::min()) {
10193 <<
": avoid UB for INT64_MIN\n");
10199 ((
Immed & 0xfff) == 0 &&
Immed >> 24 == 0));
10201 <<
" legal add imm: " << (
IsLegal ?
"yes" :
"no") <<
"\n");
10237 uint64_t NumBytes = 0;
10238 if (Ty->isSized()) {
10239 uint64_t NumBits =
DL.getTypeSizeInBits(Ty);
10240 NumBytes = NumBits / 8;
10253 unsigned shift =
Log2_64(NumBytes);
10254 if (NumBytes && Offset > 0 && (Offset / NumBytes) <= (1LL << 12) - 1 &&
10256 (Offset >> shift) << shift == Offset)
10263 return AM.
Scale == 1 || (AM.
Scale > 0 && (uint64_t)AM.
Scale == NumBytes);
10273 unsigned AS)
const {
10308 switch (Ty->getScalarType()->getTypeID()) {
10323 AArch64::X16, AArch64::X17, AArch64::LR, 0
10331 N =
N->getOperand(0).getNode();
10332 EVT VT =
N->getValueType(0);
10337 uint64_t
TruncMask =
N->getConstantOperandVal(1);
10339 N->getOperand(0).getOpcode() ==
ISD::SRL &&
10348 assert(Ty->isIntegerTy());
10350 unsigned BitSize = Ty->getPrimitiveSizeInBits();
10358 if ((int64_t)Val < 0)
10361 Val &= (1LL << 32) - 1;
10364 unsigned Shift = (63 - LZ) / 16;
10370 unsigned Index)
const {
10374 return (Index == 0 || Index ==
ResVT.getVectorNumElements());
10383 EVT VT =
N->getValueType(0);
10398 if (!ShiftAmt || ShiftAmt->getZExtValue() !=
ShiftEltTy.getSizeInBits() - 1)
10406 EVT VT =
N->getValueType(0);
10435 if (
DCI.isBeforeLegalizeOps())
10445AArch64TargetLowering::BuildSDIVPow2(
SDNode *
N,
const APInt &Divisor,
10453 EVT VT =
N->getValueType(0);
10455 !(Divisor.
isPowerOf2() || (-Divisor).isPowerOf2()))
10491 case Intrinsic::aarch64_sve_cntb:
10492 case Intrinsic::aarch64_sve_cnth:
10493 case Intrinsic::aarch64_sve_cntw:
10494 case Intrinsic::aarch64_sve_cntd:
10503 if (
DCI.isBeforeLegalizeOps())
10543 if (
N->hasOneUse() && (
N->use_begin()->getOpcode() ==
ISD::ADD ||
10544 N->use_begin()->getOpcode() ==
ISD::SUB))
10551 unsigned ShiftAmt, AddSubOpc;
10566 }
else if (
CVPlus1.isPowerOf2()) {
10567 ShiftAmt =
CVPlus1.logBase2();
10589 EVT VT =
N->getValueType(0);
10597 "NegateResult and TrailingZeroes cannot both be true for now.");
10621 EVT VT =
N->getValueType(0);
10623 N->getOperand(0)->getOperand(0)->getOpcode() !=
ISD::SETCC ||
10624 VT.
getSizeInBits() !=
N->getOperand(0)->getValueType(0).getSizeInBits())
10634 if (!
BV->isConstant())
10646 N->getOperand(0)->getOperand(0),
MaskConst);
10661 EVT VT =
N->getValueType(0);
10666 if (VT.
getSizeInBits() !=
N->getOperand(0).getValueSizeInBits())
10678 LN0->getPointerInfo(),
LN0->getAlignment(),
10679 LN0->getMemOperand()->getFlags());
10701 if (!
N->getValueType(0).isSimple())
10705 if (!Op.getValueType().isVector() || !Op.getValueType().isSimple() ||
10713 MVT FloatTy = Op.getSimpleValueType().getVectorElementType();
10719 uint32_t IntBits = IntTy.getSizeInBits();
10720 if (IntBits != 16 && IntBits != 32 && IntBits != 64)
10729 int32_t Bits = IntBits == 64 ? 64 : 32;
10730 int32_t
C =
BV->getConstantFPSplatPow2ToLog2Int(&
UndefElements, Bits + 1);
10731 if (
C == -1 ||
C == 0 ||
C > Bits)
10735 unsigned NumLanes = Op.getValueType().getVectorNumElements();
10751 "Illegal vector type after legalization");
10755 unsigned IntrinsicOpcode = IsSigned ? Intrinsic::aarch64_neon_vcvtfp2fxs
10756 : Intrinsic::aarch64_neon_vcvtfp2fxu;
10777 unsigned Opc = Op->getOpcode();
10778 if (!Op.getValueType().isVector() || !Op.getValueType().isSimple() ||
10779 !Op.getOperand(0).getValueType().isSimple() ||
10787 MVT IntTy = Op.getOperand(0).getSimpleValueType().getVectorElementType();
10788 int32_t IntBits = IntTy.getSizeInBits();
10789 if (IntBits != 16 && IntBits != 32 && IntBits != 64)
10808 unsigned NumLanes = Op.getValueType().getVectorNumElements();
10830 unsigned IntrinsicOpcode = IsSigned ? Intrinsic::aarch64_neon_vcvtfxs2fp
10831 : Intrinsic::aarch64_neon_vcvtfxu2fp;
10851 ShiftAmount =
N->getConstantOperandVal(1);
10865 EVT VT =
N->getValueType(0);
10903 EVT VT =
N->getValueType(0);
10921 uint64_t BitMask = Bits == 64 ? -1ULL : ((1ULL << Bits) - 1);
10922 for (
int i = 1;
i >= 0; --
i)
10923 for (
int j = 1;
j >= 0; --
j) {
10934 CN0->getZExtValue() != (BitMask & ~
CN1->getZExtValue())) {
10952 EVT VT =
N->getValueType(0);
10967 if (!
MemVT.getVectorElementType().isSimple())
10971 switch (
MemVT.getVectorElementType().getSimpleVT().SimpleTy) {
10988 return Op0->getAPIntValue().getLimitedValue() ==
MaskForTy;
10995 if (
DCI.isBeforeLegalizeOps())
11012 uint64_t
ExtVal =
C->getZExtValue();
11016 EVT EltTy =
UnpkOp->getValueType(0).getVectorElementType();
11028 UnpkOp->getValueType(0),
11034 return DAG.
getNode(Opc,
DL,
N->getValueType(0), And);
11083 EVT VT =
N->getValueType(0);
11125 EVT VT =
N->getValueType(0);
11138 uint64_t ShiftAmt =
C->getZExtValue();
11139 if (VT ==
MVT::i32 && ShiftAmt == 16 &&
11142 if (VT ==
MVT::i64 && ShiftAmt == 32 &&
11154 EVT VT =
N->getValueType(0);
11175 if (
N00VT ==
N10.getValueType() &&
11180 for (
size_t i = 0;
i < Mask.size(); ++
i)
11192 if (
DCI.isBeforeLegalizeOps())
11206 if (
N->getNumOperands() == 2 &&
N0Opc ==
N1Opc &&
11229 uint64_t
N00Index =
N00.getConstantOperandVal(1);
11230 uint64_t
N01Index =
N01.getConstantOperandVal(1);
11231 uint64_t
N10Index =
N10.getConstantOperandVal(1);
11232 uint64_t
N11Index =
N11.getConstantOperandVal(1);
11264 if (!
RHSTy.isVector())
11268 dbgs() <<
"aarch64-lower: concat_vectors bitcast simplification\n");
11271 RHSTy.getVectorNumElements() * 2);
11283 if (
DCI.isBeforeLegalizeOps())
11310 "unexpected vector size on extract_vector_elt!");
11341 switch (
N.getOpcode()) {
11377 N =
N.getOperand(0);
11381 N.getOperand(0).getValueType().getVectorNumElements() / 2;
11464 isSetCC(Op->getOperand(0), Info));
11474 assert(Op && Op->getOpcode() ==
ISD::ADD &&
"Unexpected operation!");
11475 SDValue LHS = Op->getOperand(0);
11476 SDValue RHS = Op->getOperand(1);
11488 ?
InfoAndKind.Info.AArch64.Cmp->getOperand(0).getValueType()
11489 :
InfoAndKind.Info.Generic.Opnd0->getValueType();
11507 EVT VT = Op->getValueType(0);
11526 if (
DCI.isBeforeLegalizeOps())
11529 MVT VT =
N->getSimpleValueType(0);
11575 if (
DCI.isBeforeLegalizeOps())
11582 "unexpected shape for long operation");
11598 N->getOperand(0), LHS, RHS);
11603 unsigned ElemBits = ElemTy.getSizeInBits();
11605 int64_t ShiftAmount;
11608 unsigned SplatBitSize;
11610 if (!
BVN->isConstantSplat(
SplatValue, SplatUndef, SplatBitSize,
11617 ShiftAmount =
CVN->getSExtValue();
11626 case Intrinsic::aarch64_neon_sqshl:
11630 case Intrinsic::aarch64_neon_uqshl:
11634 case Intrinsic::aarch64_neon_srshl:
11638 case Intrinsic::aarch64_neon_urshl:
11642 case Intrinsic::aarch64_neon_sqshlu:
11646 case Intrinsic::aarch64_neon_sshl:
11647 case Intrinsic::aarch64_neon_ushl:
11658 return DAG.
getNode(Opcode, dl,
N->getValueType(0),
N->getOperand(1),
11662 return DAG.
getNode(Opcode, dl,
N->getValueType(0),
N->getOperand(1),
11678 if (!
CMask ||
CMask->getZExtValue() != Mask)
11682 N->getOperand(0),
N->getOperand(1),
AndN.getOperand(0));
11690 N->getOperand(1).getSimpleValueType(),
11701 EVT VT =
N->getValueType(0);
11706 if (
DataVT.getVectorElementType().isScalarInteger() &&
11740 SDValue Scalar =
N->getOperand(3);
11755 EVT VT =
N->getValueType(0);
11780 if (
DCI.isBeforeLegalize())
11787 EVT VT =
N->getValueType(0);
11788 EVT CmpVT =
N->getOperand(2).getValueType();
11799 case Intrinsic::aarch64_sve_cmpeq_wide:
11800 case Intrinsic::aarch64_sve_cmpne_wide:
11801 case Intrinsic::aarch64_sve_cmpge_wide:
11802 case Intrinsic::aarch64_sve_cmpgt_wide:
11803 case Intrinsic::aarch64_sve_cmplt_wide:
11804 case Intrinsic::aarch64_sve_cmple_wide: {
11806 int64_t ImmVal =
CN->getSExtValue();
11807 if (ImmVal >= -16 && ImmVal <= 15)
11815 case Intrinsic::aarch64_sve_cmphs_wide:
11816 case Intrinsic::aarch64_sve_cmphi_wide:
11817 case Intrinsic::aarch64_sve_cmplo_wide:
11818 case Intrinsic::aarch64_sve_cmpls_wide: {
11820 uint64_t ImmVal =
CN->getZExtValue();
11846 assert(Op.getValueType().isScalableVector() &&
11848 "Expected legal scalable vector type!");
11913 case Intrinsic::aarch64_neon_vcvtfxs2fp:
11914 case Intrinsic::aarch64_neon_vcvtfxu2fp:
11916 case Intrinsic::aarch64_neon_saddv:
11918 case Intrinsic::aarch64_neon_uaddv:
11920 case Intrinsic::aarch64_neon_sminv:
11922 case Intrinsic::aarch64_neon_uminv:
11924 case Intrinsic::aarch64_neon_smaxv:
11926 case Intrinsic::aarch64_neon_umaxv:
11928 case Intrinsic::aarch64_neon_fmax:
11930 N->getOperand(1),
N->getOperand(2));
11931 case Intrinsic::aarch64_neon_fmin:
11933 N->getOperand(1),
N->getOperand(2));
11934 case Intrinsic::aarch64_neon_fmaxnm:
11936 N->getOperand(1),
N->getOperand(2));
11937 case Intrinsic::aarch64_neon_fminnm:
11939 N->getOperand(1),
N->getOperand(2));
11940 case Intrinsic::aarch64_neon_smull:
11941 case Intrinsic::aarch64_neon_umull:
11942 case Intrinsic::aarch64_neon_pmull:
11943 case Intrinsic::aarch64_neon_sqdmull:
11945 case Intrinsic::aarch64_neon_sqshl:
11946 case Intrinsic::aarch64_neon_uqshl:
11947 case Intrinsic::aarch64_neon_sqshlu:
11948 case Intrinsic::aarch64_neon_srshl:
11949 case Intrinsic::aarch64_neon_urshl:
11950 case Intrinsic::aarch64_neon_sshl:
11951 case Intrinsic::aarch64_neon_ushl:
11953 case Intrinsic::aarch64_crc32b:
11954 case Intrinsic::aarch64_crc32cb:
11956 case Intrinsic::aarch64_crc32h:
11957 case Intrinsic::aarch64_crc32ch:
11959 case Intrinsic::aarch64_sve_smaxv:
11961 case Intrinsic::aarch64_sve_umaxv:
11963 case Intrinsic::aarch64_sve_sminv:
11965 case Intrinsic::aarch64_sve_uminv:
11967 case Intrinsic::aarch64_sve_orv:
11969 case Intrinsic::aarch64_sve_eorv:
11971 case Intrinsic::aarch64_sve_andv:
11973 case Intrinsic::aarch64_sve_index:
11975 case Intrinsic::aarch64_sve_dup:
11977 case Intrinsic::aarch64_sve_dup_x:
11980 case Intrinsic::aarch64_sve_ext:
11982 case Intrinsic::aarch64_sve_smin:
11984 N->getOperand(1),
N->getOperand(2),
N->getOperand(3));
11985 case Intrinsic::aarch64_sve_umin:
11987 N->getOperand(1),
N->getOperand(2),
N->getOperand(3));
11988 case Intrinsic::aarch64_sve_smax:
11990 N->getOperand(1),
N->getOperand(2),
N->getOperand(3));
11991 case Intrinsic::aarch64_sve_umax:
11993 N->getOperand(1),
N->getOperand(2),
N->getOperand(3));
11994 case Intrinsic::aarch64_sve_lsl:
11996 N->getOperand(1),
N->getOperand(2),
N->getOperand(3));
11997 case Intrinsic::aarch64_sve_lsr:
11999 N->getOperand(1),
N->getOperand(2),
N->getOperand(3));
12000 case Intrinsic::aarch64_sve_asr:
12002 N->getOperand(1),
N->getOperand(2),
N->getOperand(3));
12003 case Intrinsic::aarch64_sve_cmphs:
12004 if (!
N->getOperand(2).getValueType().isFloatingPoint())
12006 N->getValueType(0),
N->getOperand(1),
N->getOperand(2),
12009 case Intrinsic::aarch64_sve_cmphi:
12010 if (!
N->getOperand(2).getValueType().isFloatingPoint())
12012 N->getValueType(0),
N->getOperand(1),
N->getOperand(2),
12015 case Intrinsic::aarch64_sve_cmpge:
12016 if (!
N->getOperand(2).getValueType().isFloatingPoint())
12018 N->getValueType(0),
N->getOperand(1),
N->getOperand(2),
12021 case Intrinsic::aarch64_sve_cmpgt:
12022 if (!
N->getOperand(2).getValueType().isFloatingPoint())
12024 N->getValueType(0),
N->getOperand(1),
N->getOperand(2),
12027 case Intrinsic::aarch64_sve_cmpeq:
12028 if (!
N->getOperand(2).getValueType().isFloatingPoint())
12030 N->getValueType(0),
N->getOperand(1),
N->getOperand(2),
12033 case Intrinsic::aarch64_sve_cmpne:
12034 if (!
N->getOperand(2).getValueType().isFloatingPoint())
12036 N->getValueType(0),
N->getOperand(1),
N->getOperand(2),
12039 case Intrinsic::aarch64_sve_fadda:
12041 case Intrinsic::aarch64_sve_faddv:
12043 case Intrinsic::aarch64_sve_fmaxnmv:
12045 case Intrinsic::aarch64_sve_fmaxv:
12047 case Intrinsic::aarch64_sve_fminnmv:
12049 case Intrinsic::aarch64_sve_fminv:
12051 case Intrinsic::aarch64_sve_sel:
12053 N->getOperand(1),
N->getOperand(2),
N->getOperand(3));
12054 case Intrinsic::aarch64_sve_cmpeq_wide:
12056 case Intrinsic::aarch64_sve_cmpne_wide:
12058 case Intrinsic::aarch64_sve_cmpge_wide:
12060 case Intrinsic::aarch64_sve_cmpgt_wide:
12062 case Intrinsic::aarch64_sve_cmplt_wide:
12064 case Intrinsic::aarch64_sve_cmple_wide:
12066 case Intrinsic::aarch64_sve_cmphs_wide:
12068 case Intrinsic::aarch64_sve_cmphi_wide:
12070 case Intrinsic::aarch64_sve_cmplo_wide:
12072 case Intrinsic::aarch64_sve_cmpls_wide:
12074 case Intrinsic::aarch64_sve_ptest_any:
12077 case Intrinsic::aarch64_sve_ptest_first:
12080 case Intrinsic::aarch64_sve_ptest_last:
12098 if (IID == Intrinsic::aarch64_neon_sabd ||
12099 IID == Intrinsic::aarch64_neon_uabd) {
12133 if (!
DCI.isBeforeLegalizeOps())
12153 if (
SrcVT.getSizeInBits().getKnownMinSize() != 64)
12169 LoVT.getVectorElementCount());
12184 assert(!
St.isTruncatingStore() &&
"cannot split truncating vector store");
12194 uint64_t BaseOffset = 0;
12202 if (BasePtr->getOpcode() ==
ISD::ADD &&
12205 BasePtr = BasePtr->getOperand(0);
12216 St.getMemOperand()->getFlags());
12225 assert(
ContentTy.isSimple() &&
"No SVE containers for extended types");
12227 switch (
ContentTy.getSimpleVT().SimpleTy) {
12254 EVT VT =
N->getValueType(0);
12280 EVT VT =
N->getValueType(0);
12281 EVT PtrTy =
N->getOperand(3).getValueType();
12307template <
unsigned Opcode>
12311 "Unsupported opcode.");
12313 EVT VT =
N->getValueType(0);
12343 if (
DataVT.isFloatingPoint())
12347 if (
Data.getValueType().isFloatingPoint())
12367 EVT PtrTy =
N->getOperand(4).getValueType();
12373 if (
DataVT.isFloatingPoint())
12421 if (!
StVal.hasOneUse())
12426 if (
St.isTruncatingStore())
12432 int64_t Offset =
St.getBasePtr()->getConstantOperandVal(1);
12481 if (
St.isTruncatingStore())
12594 if (
DCI.isBeforeLegalizeOps())
12598 EVT VT =
N->getValueType(0);
12628 if (UI.getUse().getResNo() == 1)
12634 SDValue Addr = LD->getOperand(1);
12641 || UI.getUse().getResNo() != Addr.
getResNo())
12658 Visited.insert(Addr.
getNode());
12659 Worklist.push_back(
User);
12660 Worklist.push_back(LD);
12661 Worklist.push_back(
Vector.getNode());
12667 Ops.push_back(LD->getOperand(0));
12670 Ops.push_back(Lane);
12672 Ops.push_back(Addr);
12673 Ops.push_back(Inc);
12704 !
DCI.isBeforeLegalizeOps());
12707 DCI.CommitTargetLoweringOpt(
TLO);
12733 if (
DCI.isBeforeLegalize() ||
DCI.isCalledByLegalizer())
12736 unsigned AddrOpIdx =
N->getNumOperands() - 1;
12744 UI.getUse().getResNo() != Addr.
getResNo())
12751 Visited.insert(Addr.
getNode());
12752 Worklist.push_back(
N);
12753 Worklist.push_back(
User);
12759 bool IsStore =
false;
12774 NumVecs = 2; IsStore =
true;
break;
12776 NumVecs = 3; IsStore =
true;
break;
12778 NumVecs = 4; IsStore =
true;
break;
12786 NumVecs = 2; IsStore =
true;
break;
12788 NumVecs = 3; IsStore =
true;
break;
12790 NumVecs = 4; IsStore =
true;
break;
12813 VecTy =
N->getOperand(2).getValueType();
12815 VecTy =
N->getValueType(0);
12829 Ops.push_back(
N->getOperand(0));
12833 Ops.push_back(
N->getOperand(
i));
12834 Ops.push_back(Addr);
12835 Ops.push_back(Inc);
12850 MemInt->getMemOperand());
12871 switch(V.getNode()->getOpcode()) {
12878 ExtType =
LoadNode->getExtensionType();
12904 1LL << (width - 1);
13072 else if (
CNV == 65535)
13144 unsigned CmpOpc = Cmp.getOpcode();
13150 if (!Cmp->hasNUsesOfValue(0, 0) || !Cmp->hasNUsesOfValue(1, 1))
13153 SDValue LHS = Cmp.getOperand(0);
13154 SDValue RHS = Cmp.getOperand(1);
13157 "Expected the value type to be the same for both operands!");
13179 DCI.CombineTo(
N, BR,
false);
13191 if (!Op->hasOneUse())
13201 Bit < Op->getValueType(0).getSizeInBits()) {
13207 Bit < Op->getOperand(0).getValueSizeInBits()) {
13211 if (Op->getNumOperands() != 2)
13218 switch (Op->getOpcode()) {
13224 if ((
C->getZExtValue() >> Bit) & 1)
13230 if (
C->getZExtValue() <= Bit &&
13231 (Bit -
C->getZExtValue()) < Op->getValueType(0).getSizeInBits()) {
13232 Bit = Bit -
C->getZExtValue();
13239 Bit = Bit +
C->getZExtValue();
13240 if (Bit >= Op->getValueType(0).getSizeInBits())
13241 Bit = Op->getValueType(0).getSizeInBits() - 1;
13246 if ((Bit +
C->getZExtValue()) < Op->getValueType(0).getSizeInBits()) {
13247 Bit = Bit +
C->getZExtValue();
13254 if ((
C->getZExtValue() >> Bit) & 1)
13272 unsigned NewOpc =
N->getOpcode();
13304 if (
ResVT.getSizeInBits() !=
CmpVT.getSizeInBits())
13334 "Scalar-SETCC feeding SELECT has unexpected result type!");
13356 if (
CCVT.getSizeInBits() !=
ResVT.getSizeInBits())
13376 ResVT.changeVectorElementTypeToInteger(), Mask);
13383 if (
N->getValueType(0) ==
N->getOperand(0).getValueType())
13384 return N->getOperand(0);
13400 uint64_t MinOffset = -1ull;
13409 MinOffset = std::min(MinOffset,
C->getZExtValue());
13411 uint64_t Offset = MinOffset +
GN->getOffset();
13416 if (Offset <= uint64_t(
GN->getOffset()))
13427 if (Offset >= (1 << 21))
13432 if (!
T->isSized() ||
13446 assert(Offset.getValueType().isScalableVector() &&
13447 "This method is only for scalable vectors of offsets");
13496 "Scatter stores are only possible for SVE vectors");
13506 if (
SrcElVT.isFloatingPoint())
13515 SDValue Offset =
N->getOperand(5);
13542 SrcVT.getScalarSizeInBits() / 8)) {
13560 Offset.getValueType().getSimpleVT().SimpleTy ==
MVT::nxv2i32)
13563 if (!TLI.isTypeLegal(Offset.getValueType()))
13573 if (
SrcVT.isFloatingPoint())
13591 return DAG.
getNode(Opcode,
DL, VTs, Ops);
13599 "Gather loads are only possible for SVE vectors");
13612 SDValue Offset =
N->getOperand(4);
13619 RetVT.getScalarSizeInBits());
13629 Offset.getValueType().isVector())
13641 RetVT.getScalarSizeInBits() / 8)) {
13663 Offset.getValueType().getSimpleVT().SimpleTy ==
MVT::nxv2i32)
13673 if (
RetVT.isFloatingPoint())
13679 Base, Offset,
OutVT};
13689 if (
RetVT.isFloatingPoint())
13698 if (
DCI.isBeforeLegalizeOps())
13725 "Sign extending from an invalid type");
13832 if (Offset.getValueType().getSimpleVT().SimpleTy !=
MVT::nxv2i32)
13863 Ops[1] = DAG.
getConstant(Intrinsic::aarch64_sve_prfb_gather_uxtw_index,
DL,
13872 switch (
N->getOpcode()) {
13933 case Intrinsic::aarch64_sve_prfb_gather_scalar_offset:
13935 case Intrinsic::aarch64_sve_prfh_gather_scalar_offset:
13937 case Intrinsic::aarch64_sve_prfw_gather_scalar_offset:
13939 case Intrinsic::aarch64_sve_prfd_gather_scalar_offset:
13941 case Intrinsic::aarch64_sve_prfb_gather_uxtw_index:
13942 case Intrinsic::aarch64_sve_prfb_gather_sxtw_index:
13943 case Intrinsic::aarch64_sve_prfh_gather_uxtw_index:
13944 case Intrinsic::aarch64_sve_prfh_gather_sxtw_index:
13945 case Intrinsic::aarch64_sve_prfw_gather_uxtw_index:
13946 case Intrinsic::aarch64_sve_prfw_gather_sxtw_index:
13947 case Intrinsic::aarch64_sve_prfd_gather_uxtw_index:
13948 case Intrinsic::aarch64_sve_prfd_gather_sxtw_index:
13950 case Intrinsic::aarch64_neon_ld2:
13951 case Intrinsic::aarch64_neon_ld3:
13952 case Intrinsic::aarch64_neon_ld4:
13953 case Intrinsic::aarch64_neon_ld1x2:
13954 case Intrinsic::aarch64_neon_ld1x3:
13955 case Intrinsic::aarch64_neon_ld1x4:
13956 case Intrinsic::aarch64_neon_ld2lane:
13957 case Intrinsic::aarch64_neon_ld3lane:
13958 case Intrinsic::aarch64_neon_ld4lane:
13959 case Intrinsic::aarch64_neon_ld2r:
13960 case Intrinsic::aarch64_neon_ld3r:
13961 case Intrinsic::aarch64_neon_ld4r:
13962 case Intrinsic::aarch64_neon_st2:
13963 case Intrinsic::aarch64_neon_st3:
13964 case Intrinsic::aarch64_neon_st4:
13965 case Intrinsic::aarch64_neon_st1x2:
13966 case Intrinsic::aarch64_neon_st1x3:
13967 case Intrinsic::aarch64_neon_st1x4:
13968 case Intrinsic::aarch64_neon_st2lane:
13969 case Intrinsic::aarch64_neon_st3lane:
13970 case Intrinsic::aarch64_neon_st4lane:
13972 case Intrinsic::aarch64_sve_ldnt1:
13974 case Intrinsic::aarch64_sve_ld1rq:
13976 case Intrinsic::aarch64_sve_ld1ro:
13978 case Intrinsic::aarch64_sve_ldnt1_gather_scalar_offset:
13980 case Intrinsic::aarch64_sve_ldnt1_gather:
13982 case Intrinsic::aarch64_sve_ldnt1_gather_index:
13985 case Intrinsic::aarch64_sve_ldnt1_gather_uxtw:
13987 case Intrinsic::aarch64_sve_ld1:
13989 case Intrinsic::aarch64_sve_ldnf1:
13991 case Intrinsic::aarch64_sve_ldff1:
13993 case Intrinsic::aarch64_sve_st1:
13995 case Intrinsic::aarch64_sve_stnt1:
13997 case Intrinsic::aarch64_sve_stnt1_scatter_scalar_offset:
13999 case Intrinsic::aarch64_sve_stnt1_scatter_uxtw:
14001 case Intrinsic::aarch64_sve_stnt1_scatter:
14003 case Intrinsic::aarch64_sve_stnt1_scatter_index:
14005 case Intrinsic::aarch64_sve_ld1_gather:
14007 case Intrinsic::aarch64_sve_ld1_gather_index:
14010 case Intrinsic::aarch64_sve_ld1_gather_sxtw:
14013 case Intrinsic::aarch64_sve_ld1_gather_uxtw:
14016 case Intrinsic::aarch64_sve_ld1_gather_sxtw_index:
14020 case Intrinsic::aarch64_sve_ld1_gather_uxtw_index:
14024 case Intrinsic::aarch64_sve_ld1_gather_scalar_offset:
14026 case Intrinsic::aarch64_sve_ldff1_gather:
14028 case Intrinsic::aarch64_sve_ldff1_gather_index:
14031 case Intrinsic::aarch64_sve_ldff1_gather_sxtw:
14035 case Intrinsic::aarch64_sve_ldff1_gather_uxtw:
14039 case Intrinsic::aarch64_sve_ldff1_gather_sxtw_index:
14043 case Intrinsic::aarch64_sve_ldff1_gather_uxtw_index:
14047 case Intrinsic::aarch64_sve_ldff1_gather_scalar_offset:
14050 case Intrinsic::aarch64_sve_st1_scatter:
14052 case Intrinsic::aarch64_sve_st1_scatter_index:
14054 case Intrinsic::aarch64_sve_st1_scatter_sxtw:
14057 case Intrinsic::aarch64_sve_st1_scatter_uxtw:
14060 case Intrinsic::aarch64_sve_st1_scatter_sxtw_index:
14064 case Intrinsic::aarch64_sve_st1_scatter_uxtw_index:
14068 case Intrinsic::aarch64_sve_st1_scatter_scalar_offset:
14070 case Intrinsic::aarch64_sve_tuple_get: {
14084 case Intrinsic::aarch64_sve_tuple_set: {
14105 Opnds.push_back(Vec);
14116 case Intrinsic::aarch64_sve_tuple_create2:
14117 case Intrinsic::aarch64_sve_tuple_create3:
14118 case Intrinsic::aarch64_sve_tuple_create4: {
14123 for (
unsigned I = 2;
I <
N->getNumOperands(); ++
I)
14124 Opnds.push_back(
N->getOperand(
I));
14130 (
N->getNumOperands() - 2));
14134 case Intrinsic::aarch64_sve_ld2:
14135 case Intrinsic::aarch64_sve_ld3:
14136 case Intrinsic::aarch64_sve_ld4: {
14140 SDValue BasePtr =
N->getOperand(3);
14142 unsigned IntrinsicID =
14145 LowerSVEStructLoad(IntrinsicID,
LoadOps,
N->getValueType(0), DAG,
DL);
14162bool AArch64TargetLowering::isUsedByReturnOnly(
SDNode *
N,
14164 if (
N->getNumValues() != 1)
14166 if (!
N->hasNUsesOfValue(1, 0))
14170 SDNode *Copy = *
N->use_begin();
14174 if (Copy->getOperand(Copy->getNumOperands() - 1).getValueType() ==
14177 TCChain = Copy->getOperand(0);
14199bool AArch64TargetLowering::mayBeEmittedAsTailCall(
const CallInst *CI)
const {
14203bool AArch64TargetLowering::getIndexedAddressParts(
SDNode *Op,
SDValue &Base,
14211 Base =
Op->getOperand(0);
14215 int64_t
RHSC = RHS->getSExtValue();
14227bool AArch64TargetLowering::getPreIndexedAddressParts(
SDNode *
N,
SDValue &Base,
14234 VT =
LD->getMemoryVT();
14235 Ptr =
LD->getBasePtr();
14237 VT =
ST->getMemoryVT();
14238 Ptr =
ST->getBasePtr();
14243 if (!getIndexedAddressParts(Ptr.
getNode(), Base, Offset, AM,
IsInc, DAG))
14249bool AArch64TargetLowering::getPostIndexedAddressParts(
14255 VT =
LD->getMemoryVT();
14256 Ptr =
LD->getBasePtr();
14258 VT =
ST->getMemoryVT();
14259 Ptr =
ST->getBasePtr();
14264 if (!getIndexedAddressParts(Op, Base, Offset, AM,
IsInc, DAG))
14312 return std::make_pair(Lo, Hi);
14315void AArch64TargetLowering::ReplaceExtractSubVectorResults(
14321 if (!
InVT.isScalableVector() || !
InVT.isInteger())
14325 EVT VT =
N->getValueType(0);
14331 if (
InVT.getVectorElementCount().Min != (
ResEC.Min * 2))
14338 unsigned Index =
CIndex->getZExtValue();
14339 if ((Index != 0) && (Index !=
ResEC.Min))
14351 SDLoc dl(V.getNode());
14372 "AtomicCmpSwap on types less than 128 should be legal");
14374 if (Subtarget->
hasLSE()) {
14387 switch (
MemOp->getOrdering()) {
14389 Opcode = AArch64::CASPX;
14392 Opcode = AArch64::CASPAX;
14395 Opcode = AArch64::CASPLX;
14399 Opcode = AArch64::CASPALX;
14425 New.first, New.second,
N->getOperand(0)};
14427 AArch64::CMP_SWAP_128,
SDLoc(
N),
14438void AArch64TargetLowering::ReplaceNodeResults(
14440 switch (
N->getOpcode()) {
14477 assert(
N->getValueType(0) ==
MVT::i128 &&
"unexpected illegal conversion");
14485 "unexpected load's value type");
14495 DAG.
getVTList({MVT::i64, MVT::i64, MVT::Other}),
14496 {LoadNode->getChain(), LoadNode->getBasePtr()},
LoadNode->getMemoryVT(),
14505 ReplaceExtractSubVectorResults(
N,
Results, DAG);
14508 EVT VT =
N->getValueType(0);
14510 "custom lowering for unexpected type");
14517 case Intrinsic::aarch64_sve_clasta_n: {
14521 N->getOperand(1), Op2,
N->getOperand(3));
14525 case Intrinsic::aarch64_sve_clastb_n: {
14529 N->getOperand(1), Op2,
N->getOperand(3));
14533 case Intrinsic::aarch64_sve_lasta: {
14536 N->getOperand(1),
N->getOperand(2));
14540 case Intrinsic::aarch64_sve_lastb: {
14543 N->getOperand(1),
N->getOperand(2));
14558unsigned AArch64TargetLowering::combineRepeatedFPDivisors()
const {
14579 unsigned Size =
SI->getValueOperand()->getType()->getPrimitiveSizeInBits();
14580 return Size == 128;
14610 if (Subtarget->
hasLSE())
14624 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
14631 if (ValTy->getPrimitiveSizeInBits() == 128) {
14633 IsAcquire ? Intrinsic::aarch64_ldaxp : Intrinsic::aarch64_ldxp;
14639 Value *Lo = Builder.CreateExtractValue(
LoHi, 0,
"lo");
14640 Value *Hi = Builder.CreateExtractValue(
LoHi, 1,
"hi");
14641 Lo = Builder.CreateZExt(Lo, ValTy,
"lo64");
14642 Hi = Builder.CreateZExt(Hi, ValTy,
"hi64");
14643 return Builder.CreateOr(
14649 IsAcquire ? Intrinsic::aarch64_ldaxr : Intrinsic::aarch64_ldxr;
14658 return Builder.CreateBitCast(Trunc, EltTy);
14663 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
14670 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
14678 IsRelease ? Intrinsic::aarch64_stlxp : Intrinsic::aarch64_stxp;
14682 Value *Lo = Builder.CreateTrunc(Val, Int64Ty,
"lo");
14683 Value *Hi = Builder.CreateTrunc(Builder.CreateLShr(Val, 64), Int64Ty,
"hi");
14685 return Builder.CreateCall(
Stxr, {Lo, Hi, Addr});
14689 IsRelease ? Intrinsic::aarch64_stlxr : Intrinsic::aarch64_stxr;
14695 Val = Builder.CreateBitCast(Val,
IntValTy);
14697 return Builder.CreateCall(
Stxr,
14698 {Builder.CreateZExtOrBitCast(
14699 Val,
Stxr->getFunctionType()->getParamType(0)),
14705 return Ty->isArrayTy();
14708bool AArch64TargetLowering::shouldNormalizeToSelectSequence(
LLVMContext &,
14714 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
14717 return IRB.CreatePointerCast(
14720 IRB.getInt8PtrTy()->getPointerTo(0));
14742 M.getOrInsertGlobal(
"__security_cookie",
14751 F->addAttribute(1, Attribute::AttrKind::InReg);
14761 return M.getGlobalVariable(
"__security_cookie");
14797 return Mask->getValue().isPowerOf2();
14840 if (AArch64::GPR64RegClass.
contains(*
I))
14841 RC = &AArch64::GPR64RegClass;
14842 else if (AArch64::FPR64RegClass.
contains(*
I))
14843 RC = &AArch64::FPR64RegClass;
14853 assert(Entry->getParent()->getFunction().hasFnAttribute(
14854 Attribute::NoUnwind) &&
14855 "Function should be nounwind in insertCopiesSplitCSR!");
14856 Entry->addLiveIn(*
I);
14861 for (
auto *Exit : Exits)
14863 TII->get(TargetOpcode::COPY), *
I)
14897void AArch64TargetLowering::finalizeLowering(
MachineFunction &MF)
const {
14907bool AArch64TargetLowering::shouldLocalize(
14909 switch (
MI.getOpcode()) {
14910 case TargetOpcode::G_GLOBAL_VALUE: {
14921 case AArch64::ADRP:
14922 case AArch64::G_ADD_LOW:
14950 "Expected legal fixed length vector!");
14976 "Expected legal fixed length vector!");
14983 PgPattern = AArch64SVEPredPattern::vl1;
14986 PgPattern = AArch64SVEPredPattern::vl2;
14989 PgPattern = AArch64SVEPredPattern::vl4;
14992 PgPattern = AArch64SVEPredPattern::vl8;
14995 PgPattern = AArch64SVEPredPattern::vl16;
14998 PgPattern = AArch64SVEPredPattern::vl32;
15001 PgPattern = AArch64SVEPredPattern::vl64;
15004 PgPattern = AArch64SVEPredPattern::vl128;
15007 PgPattern = AArch64SVEPredPattern::vl256;
15043 "Expected legal scalable vector!");
15058 "Expected to convert into a scalable vector!");
15059 assert(V.getValueType().isFixedLengthVector() &&
15060 "Expected a fixed length vector operand!");
15069 "Expected to convert into a fixed length vector!");
15070 assert(V.getValueType().isScalableVector() &&
15071 "Expected a scalable vector operand!");
15078SDValue AArch64TargetLowering::LowerFixedLengthVectorLoadToSVE(
15083 EVT VT =
Op.getValueType();
15089 Load->getMemoryVT(),
Load->getMemOperand(),
Load->getAddressingMode(),
15090 Load->getExtensionType());
15098SDValue AArch64TargetLowering::LowerFixedLengthVectorStoreToSVE(
15103 EVT VT =
Store->getValue().getValueType();
15110 Store->getMemOperand(),
Store->getAddressingMode(),
15111 Store->isTruncatingStore());
15114SDValue AArch64TargetLowering::LowerFixedLengthVectorTruncateToSVE(
15116 EVT VT =
Op.getValueType();
15152 unsigned NewOp)
const {
15153 EVT VT =
Op.getValueType();
15157 if (useSVEForFixedLengthVectorVT(VT)) {
15162 for (
const SDValue &V :
Op->op_values()) {
15168 assert(useSVEForFixedLengthVectorVT(V.getValueType()) &&
15169 "Only fixed length vectors are supported!");
15180 for (
const SDValue &V :
Op->op_values()) {
15182 "Only scalable vectors are supported!");
unsigned const MachineRegisterInfo * MRI
if(Register::isVirtualRegister(Reg)) return MRI -> getRegClass(Reg) ->hasSuperClassEq(&AArch64::GPR64RegClass)
static unsigned MatchRegisterName(StringRef Name)
static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG)
NarrowVector - Given a value in the V128 register class, produce the equivalent value in the V64 regi...
static bool isConcatMask(ArrayRef< int > Mask, EVT VT, bool SplitLHS)
static SDValue emitComparison(SDValue LHS, SDValue RHS, ISD::CondCode CC, const SDLoc &dl, SelectionDAG &DAG)
static SDValue EmitVectorComparison(SDValue LHS, SDValue RHS, AArch64CC::CondCode CC, bool NoNans, EVT VT, const SDLoc &dl, SelectionDAG &DAG)
static SDValue emitConditionalComparison(SDValue LHS, SDValue RHS, ISD::CondCode CC, SDValue CCOp, AArch64CC::CondCode Predicate, AArch64CC::CondCode OutCC, const SDLoc &DL, SelectionDAG &DAG)
can be transformed to: not (and (not (and (setCC (cmp C)) (setCD (cmp D)))) (and (not (setCA (cmp A))...
static void changeVectorFPCCToAArch64CC(ISD::CondCode CC, AArch64CC::CondCode &CondCode, AArch64CC::CondCode &CondCode2, bool &Invert)
changeVectorFPCCToAArch64CC - Convert a DAG fp condition code to an AArch64 CC usable with the vector...
static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, int64_t &Cnt)
isVShiftRImm - Check if this is a valid build_vector for the immediate operand of a vector shift righ...
static bool isSingletonEXTMask(ArrayRef< int > M, EVT VT, unsigned &Imm)
static SDValue performCONDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG, unsigned CCIndex, unsigned CmpIndex)
static SDValue tryConvertSVEWideCompare(SDNode *N, ISD::CondCode CC, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
static SDValue NormalizeBuildVector(SDValue Op, SelectionDAG &DAG)
static SDValue replaceZeroVectorStore(SelectionDAG &DAG, StoreSDNode &St)
Replace a splat of zeros to a vector store by scalar stores of WZR/XZR.
static SDValue GenerateTBL(SDValue Op, ArrayRef< int > ShuffleMask, SelectionDAG &DAG)
static SDValue performMulCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget)
static std::pair< SDValue, SDValue > splitInt128(SDValue N, SelectionDAG &DAG)
static bool setInfoSVEStN(AArch64TargetLowering::IntrinsicInfo &Info, const CallInst &CI)
Set the IntrinsicInfo for the aarch64_sve_st<N> intrinsics.
static SDValue splitStores(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG, const AArch64Subtarget *Subtarget)
static bool isSetCC(SDValue Op, SetCCInfoAndKind &SetCCInfo)
Check whether or not Op is a SET_CC operation, either a generic or an AArch64 lowered one.
static bool isLegalArithImmed(uint64_t C)
static EVT getContainerForFixedLengthVector(SelectionDAG &DAG, EVT VT)
static SDValue performSTNT1Combine(SDNode *N, SelectionDAG &DAG)
static bool areExtractShuffleVectors(Value *Op1, Value *Op2)
Check if both Op1 and Op2 are shufflevector extracts of either the lower or upper half of the vector ...
static bool isREVMask(ArrayRef< int > M, EVT VT, unsigned BlockSize)
isREVMask - Check if a vector shuffle corresponds to a REV instruction with the specified blocksize.
static SDValue performSRLCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SDValue performFDivCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget)
Fold a floating-point divide by power of two into fixed-point to floating-point conversion.
static SDValue getScaledOffsetForBitWidth(SelectionDAG &DAG, SDValue Offset, SDLoc DL, unsigned BitWidth)
static SDValue tryLowerToSLI(SDNode *N, SelectionDAG &DAG)
static bool checkValueWidth(SDValue V, unsigned width, ISD::LoadExtType &ExtType)
static SDValue performSVEAndCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SDValue performBRCONDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
static MVT getSVEContainerType(EVT ContentTy)
static SDValue performNEONPostLDSTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
Target-specific DAG combine function for NEON load/store intrinsics to merge base address updates.
static void ReplaceCMP_SWAP_128Results(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, const AArch64Subtarget *Subtarget)
static SDValue getReductionSDNode(unsigned Op, SDLoc DL, SDValue ScalarOp, SelectionDAG &DAG)
static bool areExtractExts(Value *Ext1, Value *Ext2)
Check if Ext1 and Ext2 are extends of the same type, doubling the bitwidth of the vector elements.
static SDValue performSelectCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
A vector select: "(select vL, vR, (setcc LHS, RHS))" is best performed with the compare-mask instruct...
static bool canGuaranteeTCO(CallingConv::ID CC)
Return true if the calling convention is one that we can guarantee TCO for.
static cl::opt< bool > EnableOptimizeLogicalImm("aarch64-enable-logical-imm", cl::Hidden, cl::desc("Enable AArch64 logical imm instruction " "optimization"), cl::init(true))
static bool isUZPMask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
static bool isValidImmForSVEVecImmAddrMode(unsigned OffsetInBytes, unsigned ScalarSizeInBytes)
Check if the value of OffsetInBytes can be used as an immediate for the gather load/prefetch and scat...
static bool isUZP_v_undef_Mask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
isUZP_v_undef_Mask - Special case of isUZPMask for canonical form of "vector_shuffle v,...
static SDValue tryAdvSIMDModImm16(unsigned NewOp, SDValue Op, SelectionDAG &DAG, const APInt &Bits, const SDValue *LHS=nullptr)
static unsigned getDUPLANEOp(EVT EltType)
static void changeFPCCToAArch64CC(ISD::CondCode CC, AArch64CC::CondCode &CondCode, AArch64CC::CondCode &CondCode2)
changeFPCCToAArch64CC - Convert a DAG fp condition code to an AArch64 CC.
static bool isTRNMask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
static SDValue performGlobalAddressCombine(SDNode *N, SelectionDAG &DAG, const AArch64Subtarget *Subtarget, const TargetMachine &TM)
static SDValue LowerTruncateVectorStore(SDLoc DL, StoreSDNode *ST, EVT VT, EVT MemVT, SelectionDAG &DAG)
static SDValue tryAdvSIMDModImmFP(unsigned NewOp, SDValue Op, SelectionDAG &DAG, const APInt &Bits)
static SDValue performSetccAddFolding(SDNode *Op, SelectionDAG &DAG)
static SDValue performNVCASTCombine(SDNode *N)
Get rid of unnecessary NVCASTs (that don't change the type).
static SDValue ConstantBuildVector(SDValue Op, SelectionDAG &DAG)
static void ReplaceReductionResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, unsigned InterOp, unsigned AcrossOp)
static bool findEXTRHalf(SDValue N, SDValue &Src, uint32_t &ShiftAmount, bool &FromHi)
An EXTR instruction is made up of two shifts, ORed together.
static bool isEquivalentMaskless(unsigned CC, unsigned width, ISD::LoadExtType ExtType, int AddConstant, int CompConstant)
static SDValue LowerSVEIntrinsicEXT(SDNode *N, SelectionDAG &DAG)
static EVT getExtensionTo64Bits(const EVT &OrigVT)
static SDValue LowerSVEIntrinsicIndex(SDNode *N, SelectionDAG &DAG)
static SDValue tryAdvSIMDModImm64(unsigned NewOp, SDValue Op, SelectionDAG &DAG, const APInt &Bits)
static SDValue tryAdvSIMDModImm8(unsigned NewOp, SDValue Op, SelectionDAG &DAG, const APInt &Bits)
static SDValue emitConjunctionRec(SelectionDAG &DAG, SDValue Val, AArch64CC::CondCode &OutCC, bool Negate, SDValue CCOp, AArch64CC::CondCode Predicate)
Emit conjunction or disjunction tree with the CMP/FCMP followed by a chain of CCMP/CFCMP ops.
static SDValue performVectorCompareAndMaskUnaryOpCombine(SDNode *N, SelectionDAG &DAG)
static void ReplaceBITCASTResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG)
static bool isINSMask(ArrayRef< int > M, int NumInputElements, bool &DstIsLeft, int &Anomaly)
static bool resolveBuildVector(BuildVectorSDNode *BVN, APInt &CnstBits, APInt &UndefBits)
static SDValue LowerSVEIntrinsicDUP(SDNode *N, SelectionDAG &DAG)
static unsigned getIntrinsicID(const SDNode *N)
static bool IsSVECntIntrinsic(SDValue S)
static SDValue performANDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static bool canEmitConjunction(const SDValue Val, bool &CanNegate, bool &MustBeFirst, bool WillNegate, unsigned Depth=0)
Returns true if Val is a tree of AND/OR/SETCC operations that can be expressed as a conjunction.
static SDValue LowerSVEIntReduction(SDNode *N, unsigned Opc, SelectionDAG &DAG)
static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget)
static SDValue getPredicateForFixedLengthVector(SelectionDAG &DAG, SDLoc &DL, EVT VT)
static SDValue splitStoreSplat(SelectionDAG &DAG, StoreSDNode &St, SDValue SplatVal, unsigned NumVecElts)
static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, bool isSigned)
static SDValue performST1Combine(SDNode *N, SelectionDAG &DAG)
static SDValue LowerXOR(SDValue Op, SelectionDAG &DAG)
static SDValue performSignExtendInRegCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
static SDValue combineSVEReductionOrderedFP(SDNode *N, unsigned Opc, SelectionDAG &DAG)
static SDValue legalizeSVEGatherPrefetchOffsVec(SDNode *N, SelectionDAG &DAG)
Legalize the gather prefetch (scalar + vector addressing mode) when the offset vector is an unpacked ...
static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG)
static SDValue performLD1Combine(SDNode *N, SelectionDAG &DAG, unsigned Opc)
static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG)
static SDValue performIntToFpCombine(SDNode *N, SelectionDAG &DAG, const AArch64Subtarget *Subtarget)
static SDValue combineSVEPrefetchVecBaseImmOff(SDNode *N, SelectionDAG &DAG, unsigned ScalarSizeInBytes)
Combines a node carrying the intrinsic aarch64_sve_prf<T>_gather_scalar_offset into a node that uses ...
static SDValue replaceSplatVectorStore(SelectionDAG &DAG, StoreSDNode &St)
Replace a splat of a scalar to a vector store by scalar stores of the scalar value.
static SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG)
static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt)
getVShiftImm - Check if this is a valid build_vector for the immediate operand of a vector shift oper...
static AArch64CC::CondCode changeIntCCToAArch64CC(ISD::CondCode CC)
changeIntCCToAArch64CC - Convert a DAG integer condition code to an AArch64 CC
static SDValue performGatherLoadCombine(SDNode *N, SelectionDAG &DAG, unsigned Opcode, bool OnlyPackedOffsets=true)
static SDValue combineSVEReductionFP(SDNode *N, unsigned Opc, SelectionDAG &DAG)
static bool optimizeLogicalImm(SDValue Op, unsigned Size, uint64_t Imm, const APInt &Demanded, TargetLowering::TargetLoweringOpt &TLO, unsigned NewOpc)
static unsigned getCmpOperandFoldingProfit(SDValue Op)
Returns how profitable it is to fold a comparison's operand's shift and/or extension operations.
static SDValue performConcatVectorsCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
static SDValue combineAcrossLanesIntrinsic(unsigned Opc, SDNode *N, SelectionDAG &DAG)
static SDValue tryAdvSIMDModImm321s(unsigned NewOp, SDValue Op, SelectionDAG &DAG, const APInt &Bits)
static SDValue addRequiredExtensionForVectorMULL(SDValue N, SelectionDAG &DAG, const EVT &OrigTy, const EVT &ExtTy, unsigned ExtOpcode)
static SDValue getPredicateForScalableVector(SelectionDAG &DAG, SDLoc &DL, EVT VT)
static SDValue tryFormConcatFromShuffle(SDValue Op, SelectionDAG &DAG)
static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG)
static SDValue getPTrue(SelectionDAG &DAG, SDLoc DL, EVT VT, int Pattern)
static bool isEXTMask(ArrayRef< int > M, EVT VT, bool &ReverseEXT, unsigned &Imm)
static SDValue tryCombineFixedPointConvert(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
static SDValue getPredicateForVector(SelectionDAG &DAG, SDLoc &DL, EVT VT)
static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG)
static SDValue performFpToIntCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget)
Fold a floating-point multiply by power of two into floating-point to fixed-point conversion.
static void changeFPCCToANDAArch64CC(ISD::CondCode CC, AArch64CC::CondCode &CondCode, AArch64CC::CondCode &CondCode2)
Convert a DAG fp condition code to an AArch64 CC.
static SDValue foldVectorXorShiftIntoCmp(SDNode *N, SelectionDAG &DAG, const AArch64Subtarget *Subtarget)
Turn vector tests of the signbit in the form of: xor (sra X, elt_size(X)-1), -1 into: cmge X,...
static SDValue tryCombineCRC32(unsigned Mask, SDNode *N, SelectionDAG &DAG)
static bool isAllConstantBuildVector(const SDValue &PotentialBVec, uint64_t &ConstVal)
static SDValue tryCombineShiftImm(unsigned IID, SDNode *N, SelectionDAG &DAG)
static SDValue WidenVector(SDValue V64Reg, SelectionDAG &DAG)
WidenVector - Given a value in the V64 register class, produce the equivalent value in the V128 regis...
static SDValue performLD1ReplicateCombine(SDNode *N, SelectionDAG &DAG)
static SDValue getPTest(SelectionDAG &DAG, EVT VT, SDValue Pg, SDValue Op, AArch64CC::CondCode Cond)
static bool isSetCCOrZExtSetCC(const SDValue &Op, SetCCInfoAndKind &Info)
cl::opt< bool > EnableAArch64ELFLocalDynamicTLSGeneration("aarch64-elf-ldtls-generation", cl::Hidden, cl::desc("Allow AArch64 Local Dynamic TLS code generation"), cl::init(false))
static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG)
static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, SDValue RHS, SelectionDAG &DAG, const SDLoc &dl)
GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit the specified operations t...
static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG)
static SDValue performVSelectCombine(SDNode *N, SelectionDAG &DAG)
static SDValue performPostLD1Combine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, bool IsLaneOp)
Target-specific DAG combine function for post-increment LD1 (lane) and post-increment LD1R.
static bool areOperandsOfVmullHighP64(Value *Op1, Value *Op2)
Check if Op1 and Op2 could be used with vmull_high_p64 intrinsic.
static SDValue tryExtendDUPToExtractHigh(SDValue N, SelectionDAG &DAG)
static SDValue performXorCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget)
static PredicateConstraint parsePredicateConstraint(StringRef Constraint)
static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG)
static SDValue skipExtensionForVectorMULL(SDNode *N, SelectionDAG &DAG)
static SDValue createGPRPairNode(SelectionDAG &DAG, SDValue V)
static bool isPackedVectorType(EVT VT, SelectionDAG &DAG)
Returns true if VT's elements occupy the lowest bit positions of its associated register class withou...
static SDValue performAddSubLongCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
static bool isTRN_v_undef_Mask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
isTRN_v_undef_Mask - Special case of isTRNMask for canonical form of "vector_shuffle v,...
static bool isZIPMask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
static SDValue performSTORECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG, const AArch64Subtarget *Subtarget)
static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt)
isVShiftLImm - Check if this is a valid build_vector for the immediate operand of a vector shift left...
static SDValue performExtendCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
static SDValue getTestBitOperand(SDValue Op, unsigned &Bit, bool &Invert, SelectionDAG &DAG)
static SDValue tryCombineToBSL(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SDValue emitStrictFPComparison(SDValue LHS, SDValue RHS, const SDLoc &dl, SelectionDAG &DAG, SDValue Chain, bool IsSignaling)
static bool isSignExtended(SDNode *N, SelectionDAG &DAG)
static bool isZeroExtended(SDNode *N, SelectionDAG &DAG)
static SDValue convertToScalableVector(SelectionDAG &DAG, EVT VT, SDValue V)
static SDValue performScatterStoreCombine(SDNode *N, SelectionDAG &DAG, unsigned Opcode, bool OnlyPackedOffsets=true)
static SDValue tryCombineLongOpWithDup(unsigned IID, SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
static bool isCMN(SDValue Op, ISD::CondCode CC)
static SDValue tryCombineToEXTR(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
EXTR instruction extracts a contiguous chunk of bits from two existing registers viewed as a high/low...
static bool isOperandOfVmullHighP64(Value *Op)
Check if Op could be used with vmull_high_p64 intrinsic.
static SDValue getEstimate(const AArch64Subtarget *ST, unsigned Opcode, SDValue Operand, SelectionDAG &DAG, int &ExtraSteps)
static bool isEssentiallyExtractHighSubvector(SDValue N)
static bool mayTailCallThisCC(CallingConv::ID CC)
Return true if we might ever do TCO for calls with this calling convention.
static Value * UseTlsOffset(IRBuilder<> &IRB, unsigned Offset)
static unsigned getExtFactor(SDValue &V)
getExtFactor - Determine the adjustment factor for the position when generating an "extract from vect...
static bool isConstantSplatVectorMaskForType(SDNode *N, EVT MemVT)
static SDValue tryAdvSIMDModImm32(unsigned NewOp, SDValue Op, SelectionDAG &DAG, const APInt &Bits, const SDValue *LHS=nullptr)
static SDValue performLDNT1Combine(SDNode *N, SelectionDAG &DAG)
static const MVT MVT_CC
Value type used for condition codes.
static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG)
static SDValue performTBZCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
static SDValue emitConjunction(SelectionDAG &DAG, SDValue Val, AArch64CC::CondCode &OutCC)
Emit expression as a conjunction (a series of CCMP/CFCMP ops).
static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, SDValue &AArch64cc, SelectionDAG &DAG, const SDLoc &dl)
static bool performTBISimplification(SDValue Addr, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
Simplify Addr given that the top byte of it is ignored by HW during address translation.
static SDValue performIntrinsicCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget)
static bool isZIP_v_undef_Mask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
isZIP_v_undef_Mask - Special case of isZIPMask for canonical form of "vector_shuffle v,...
static SDValue convertFromScalableVector(SelectionDAG &DAG, EVT VT, SDValue V)
static std::pair< SDValue, SDValue > getAArch64XALUOOp(AArch64CC::CondCode &CC, SDValue Op, SelectionDAG &DAG)
#define FALKOR_STRIDED_ACCESS_MD
static const unsigned PerfectShuffleTable[6561+1]
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static bool isConstant(const MachineInstr &MI)
amdgpu Simplify well known AMD library false FunctionCallee Callee
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
static const MCPhysReg GPRArgRegs[]
Function Alias Analysis Results
This file contains the simple types necessary to represent the attributes associated with functions a...
SmallVector< MachineOperand, 4 > Cond
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< ShadowStackGC > C("shadow-stack", "Very portable GC for uncooperative code generators")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
PropagateLiveness Given that RA is a live propagate it s liveness to any other values it uses(according to Uses). void DeadArgumentEliminationPass
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
std::pair< Value *, Value * > ShuffleOps
We are building a shuffle to create V, which is a sequence of insertelement, extractelement pairs.
mir Rename Register Operands
static Value * getNumElements(BasicBlock *Preheader, Value *BTC)
unsigned const TargetRegisterInfo * TRI
Module.h This file contains the declarations for the Module class.
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
#define STATISTIC(VARNAME, DESC)
static const int BlockSize
This defines the Use class.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
static constexpr int Concat[]
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
unsigned getVarArgsFPRSize() const
void setVarArgsStackIndex(int Index)
SmallVectorImpl< ForwardedRegister > & getForwardedMustTailRegParms()
void setIsSplitCSR(bool s)
int getVarArgsFPRIndex() const
void incNumLocalDynamicTLSAccesses()
void setBytesInStackArgArea(unsigned bytes)
int getVarArgsStackIndex() const
void setVarArgsGPRIndex(int Index)
int getVarArgsGPRIndex() const
void setVarArgsFPRSize(unsigned Size)
unsigned getVarArgsGPRSize() const
unsigned getSRetReturnReg() const
void setSRetReturnReg(unsigned Reg)
unsigned getBytesInStackArgArea() const
void setVarArgsFPRIndex(int Index)
void setVarArgsGPRSize(unsigned Size)
void setArgumentStackToRestore(unsigned bytes)
void UpdateCustomCalleeSavedRegs(MachineFunction &MF) const
static bool hasSVEArgsOrReturn(const MachineFunction *MF)
bool isTargetWindows() const
bool hasFuseLiterals() const
unsigned getPrefLoopLogAlignment() const
const AArch64RegisterInfo * getRegisterInfo() const override
unsigned getPrefFunctionLogAlignment() const
bool isMisaligned128StoreSlow() const
const AArch64InstrInfo * getInstrInfo() const override
unsigned getMaximumJumpTableSize() const
bool isTargetDarwin() const
bool isTargetILP32() const
ARMProcFamilyEnum getProcFamily() const
Returns ARM processor family.
unsigned classifyGlobalFunctionReference(const GlobalValue *GV, const TargetMachine &TM) const
bool isTargetMachO() const
bool supportsAddressTopByteIgnored() const
CPU has TBI (top byte of addresses is ignored during HW address translation) and OS enables it.
bool isTargetAndroid() const
const Triple & getTargetTriple() const
bool isCallingConvWin64(CallingConv::ID CC) const
unsigned getMinSVEVectorSizeInBits() const
unsigned ClassifyGlobalReference(const GlobalValue *GV, const TargetMachine &TM) const
ClassifyGlobalReference - Find the target operand flags that describe how a global value should be re...
bool isLittleEndian() const
bool hasAggressiveFMA() const
bool isXRegisterReserved(size_t i) const
bool requiresStrictAlign() const
bool isTargetFuchsia() const
bool predictableSelectIsExpensive() const
bool hasCustomCallingConv() const
bool isTruncateFree(Type *Ty1, Type *Ty2) const override
Return true if it's free to truncate a value of type FromTy to type ToTy.
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
Returns true if the target can instruction select the specified FP immediate natively.
void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const override
void initializeSplitCSR(MachineBasicBlock *Entry) const override
Perform necessary initialization to handle a subset of CSRs explicitly via copies.
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const override
Return the preferred vector type legalization action.
bool isShuffleMaskLegal(ArrayRef< int > M, EVT VT) const override
Return true if the given shuffle mask can be codegen'd directly, or if it should be stack expanded.
unsigned getVaListSizeInBits(const DataLayout &DL) const override
Returns the size of the platform's va_list object.
void insertCopiesSplitCSR(MachineBasicBlock *Entry, const SmallVectorImpl< MachineBasicBlock * > &Exits) const override
Insert explicit copies in entry and exit blocks.
bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override
Returns true if the given (atomic) store should be expanded by the IR-level AtomicExpand pass into an...
int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS) const override
Return the cost of the scaling factor used in the addressing mode represented by AM for this target,...
bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override
Returns true if it is beneficial to convert a load of a constant to just the constant itself.
Value * emitLoadLinked(IRBuilder<> &Builder, Value *Addr, AtomicOrdering Ord) const override
Perform a load-linked operation on Addr, returning a "Value *" with the corresponding pointee type.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
Provide custom lowering hooks for some operations.
bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override
For some targets, an LLVM struct type must be broken down into multiple simple types,...
bool isIntDivCheap(EVT VT, AttributeList Attr) const override
Return true if integer divide is usually cheaper than a sequence of several shifts,...
CCAssignFn * CCAssignFnForReturn(CallingConv::ID CC) const
Selects the correct CCAssignFn for a given CallingConvention value.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the ISD::SETCC ValueType.
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo) const override
This method returns a target specific FastISel object, or null if the target does not support "fast" ...
CCAssignFn * CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const
Selects the correct CCAssignFn for a given CallingConvention value.
MachineMemOperand::Flags getTargetMMOFlags(const Instruction &I) const override
This callback is used to inspect load/store instructions and add target-specific MachineMemOperand fl...
bool isLegalICmpImmediate(int64_t) const override
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &FuncAttributes) const override
Returns the target specific optimal type for load and store operations as a result of memset,...
bool preferIncOfAddToSubOfNot(EVT VT) const override
These two forms are equivalent: sub y, (xor x, -1) add (add x, 1), y The variant with two add's is IR...
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override
Returns how the given (atomic) load should be expanded by the IR-level AtomicExpand pass.
bool lowerInterleavedLoad(LoadInst *LI, ArrayRef< ShuffleVectorInst * > Shuffles, ArrayRef< unsigned > Indices, unsigned Factor) const override
Lower an interleaved load into a ldN intrinsic.
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass.
bool shouldSinkOperands(Instruction *I, SmallVectorImpl< Use * > &Ops) const override
Check if sinking I's operands to I's basic block is profitable, because the operands can be folded in...
bool fallBackToDAGISel(const Instruction &Inst) const override
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override
getTgtMemIntrinsic - Represent NEON load and store intrinsics as MemIntrinsicNodes.
Function * getSSPStackGuardCheck(const Module &M) const override
If the target has a standard stack protection check function that performs validation and error handl...
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
bool isLegalInterleavedAccessType(VectorType *VecTy, const DataLayout &DL) const
Returns true if VecTy is a legal interleaved access type.
unsigned getMaxSupportedInterleaveFactor() const override
Get the maximum supported factor for interleaved memory accesses.
void insertSSPDeclarations(Module &M) const override
Inserts necessary declarations for SSP (stack protection) purpose.
MachineBasicBlock * EmitLoweredCatchRet(MachineInstr &MI, MachineBasicBlock *BB) const
Value * getIRStackGuard(IRBuilder<> &IRB) const override
If the target has a standard location for the stack protector cookie, returns the address of that loc...
bool isZExtFree(Type *Ty1, Type *Ty2) const override
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
bool useLoadStackGuardNode() const override
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override
Return if the target supports combining a chain like:
bool isProfitableToHoist(Instruction *I) const override
Check if it is profitable to hoist instruction in then/else to if.
bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT) const override
Return true if it is profitable to reduce a load to a smaller type.
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const override
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI, unsigned Factor) const override
Lower an interleaved store into a stN intrinsic.
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
unsigned getNumInterleavedAccesses(VectorType *VecTy, const DataLayout &DL) const
Returns the number of interleaved accesses that will be generated when lowering accesses of the given...
Value * getSafeStackPointerLocation(IRBuilder<> &IRB) const override
If the target has a standard location for the unsafe stack pointer, returns the address of that locat...
MachineBasicBlock * EmitF128CSEL(MachineInstr &MI, MachineBasicBlock *BB) const
LLT getOptimalMemOpLLT(const MemOp &Op, const AttributeList &FuncAttributes) const override
LLT returning variant.
bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y, unsigned OldShiftOpcode, unsigned NewShiftOpcode, SelectionDAG &DAG) const override
Given the pattern (X & (C l>>/<< Y)) ==/!= 0 return true if it should be transformed into: ((X <</l>>...
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
bool needsFixedCatchObjects() const override
Used for exception handling on Win64.
bool targetShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, TargetLoweringOpt &TLO) const override
bool hasPairedLoad(EVT LoadedType, Align &RequiredAligment) const override
Return true if the target supplies and combines to a paired load two loaded values of type LoadedType...
AArch64TargetLowering(const TargetMachine &TM, const AArch64Subtarget &STI)
bool isLegalAddImmediate(int64_t) const override
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
bool shouldConsiderGEPOffsetSplit() const override
const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const override
Returns a 0 terminated array of registers that can be safely used as scratch registers.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, unsigned Index) const override
Return true if EXTRACT_SUBVECTOR is cheap for this result type with this index.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
bool isDesirableToCommuteWithShift(const SDNode *N, CombineLevel Level) const override
Returns false if N is a bit extraction pattern of (X >> C) & Mask.
bool enableAggressiveFMAFusion(EVT VT) const override
Enable aggressive FMA fusion on targets that want it.
Value * getSDagStackGuard(const Module &M) const override
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override
EVT is not used in-tree, but is used by out-of-tree target.
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace=0, unsigned Align=1, MachineMemOperand::Flags Flags=MachineMemOperand::MONone, bool *Fast=nullptr) const override
Returns true if the target allows unaligned memory accesses of the specified type.
bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override
Return true if SHIFT instructions should be expanded to SHIFT_PARTS instructions, and false if a libr...
Value * emitStoreConditional(IRBuilder<> &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const override
Perform a store-conditional operation to Addr.
APInt bitcastToAPInt() const
Class for arbitrary precision integers.
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
static APInt getAllOnesValue(unsigned numBits)
Get the all-ones value.
unsigned countTrailingZeros() const
Count the number of trailing zero bits.
unsigned logBase2() const
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Get a value with low bits set.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Get a value with high bits set.
int64_t getSExtValue() const
Get sign extended value.
an instruction to allocate memory on the stack
This class represents an incoming formal argument to a Function.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
bool isFloatingPointOperation() const
BinOp getOperation() const
This is an SDNode representing atomic operations.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Equivalent to hasAttribute(AttributeList::FunctionIndex, Kind) but may be faster.
LLVM Basic Block Representation.
const BlockAddress * getBlockAddress() const
A "pseudo-class" with methods for operating on BUILD_VECTORs.
CCState - This class holds information needed while lowering arguments and return values.
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
static bool resultsCompatible(CallingConv::ID CalleeCC, CallingConv::ID CallerCC, MachineFunction &MF, LLVMContext &C, const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn CalleeFn, CCAssignFn CallerFn)
Returns true if the results of the two calling conventions are compatible.
void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
unsigned getNextStackOffset() const
getNextStackOffset - Return the next stack offset such that all stack slots satisfy their alignment r...
bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeCallOperands - Analyze the outgoing arguments to a call, incorporating info about the passed v...
CCValAssign - Represent assignment of one arg/retval to a location.
Value * getArgOperand(unsigned i) const
unsigned getNumArgOperands() const
This class represents a function call, abstracting a target machine's calling convention.
This is the shared class of boolean and integer constants.
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
const APInt & getValue() const
Return the constant as an APInt value reference.
uint64_t getZExtValue() const
A parsed version of the target data layout string in and methods for querying it.
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Constant * getPersonalityFn() const
Get the personality function associated with this function.
AttributeList getAttributes() const
Return the attribute list for this Function.
const Function & getFunction() const
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
bool hasExternalWeakLinkage() const
Module * getParent()
Get the module that this global value is contained inside of...
Type * getValueType() const
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Class to represent integer types.
A wrapper class for inspecting calls to intrinsic functions.
static LLT vector(uint16_t NumElements, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
static LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
This is an important class for using LLVM in a threaded context.
bool isIndexed() const
Return true if this is a pre/post inc/dec load/store.
An instruction for reading from memory.
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
Value * getPointerOperand()
This class is used to represent ISD::LOAD nodes.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
static MVT getFloatingPointVT(unsigned BitWidth)
bool is128BitVector() const
Return true if this is a 128-bit vector type.
static mvt_range fixedlen_vector_valuetypes()
static mvt_range fp_scalable_vector_valuetypes()
bool isVector() const
Return true if this is a vector value type.
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
static mvt_range integer_valuetypes()
static mvt_range integer_fixedlen_vector_valuetypes()
static MVT getVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
static mvt_range integer_scalable_vector_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
bool isFixedLengthVector() const
static MVT getVectorVT(MVT VT, unsigned NumElements)
static mvt_range fp_fixedlen_vector_valuetypes()
MVT getVectorElementType() const
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
MVT getScalarType() const
If this is a vector, return the element type, otherwise return this.
static mvt_range fp_valuetypes()
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
void setAdjustsStack(bool V)
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setFrameAddressIsTaken(bool T)
void setStackID(int ObjectIdx, uint8_t ID)
bool hasMustTailInVarArgFunc() const
Returns true if the function is variadic and contains a musttail call.
void setReturnAddressIsTaken(bool s)
void computeMaxCallFrameSize(const MachineFunction &MF)
Computes the maximum size of a callframe and the AdjustsStack property.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
Flags getFlags() const
Return the raw flags of the source value,.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
An SDNode that represents everything that will be needed to construct a MachineInstr.
This SDNode is used for target intrinsics that touch memory and need an associated MachineMemOperand.
This is an abstract virtual class for memory operations.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const SDValue & getBasePtr() const
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
unsigned getAlignment() const
EVT getMemoryVT() const
Return the type of the in-memory value.
A Module instance is used to store all the information related to an LLVM module.
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Class to represent pointers.
Type * getElementType() const
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
This class provides iterator support for SDUse operands that use a specific SDNode.
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
bool hasOneUse() const
Return true if there is exactly one use of this node.
static bool hasPredecessorHelper(const SDNode *N, SmallPtrSetImpl< const SDNode * > &Visited, SmallVectorImpl< const SDNode * > &Worklist, unsigned int MaxSteps=0, bool TopologicalPrune=false)
Returns true if N is a predecessor of any node in Worklist.
unsigned getNumOperands() const
Return the number of values used by this operation.
const SDValue & getOperand(unsigned Num) const
uint64_t getConstantOperandVal(unsigned Num) const
Helper method returns the integer value of a ConstantSDNode operand.
use_iterator use_begin() const
Provide iteration support to walk over all uses of an SDNode.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
bool hasNUsesOfValue(unsigned NUses, unsigned Value) const
Return true if there are exactly NUSES uses of the indicated value.
static use_iterator use_end()
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
TypeSize getScalarValueSizeInBits() const
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
uint64_t getConstantOperandVal(unsigned i) const
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS)
Helper function to make it easier to build Select's if you just have operands and don't want to check...
const TargetSubtargetInfo & getSubtarget() const
SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm)
Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=None, int Offset=0, unsigned TargetFlags=0)
SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain, SDValue Ptr, SDValue Val, MachineMemOperand *MMO)
Gets a node for an atomic op, produces result (if relevant) and chain and takes 2 operands.
void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)
std::pair< SDValue, SDValue > SplitVectorOperand(const SDNode *N, unsigned OpNo)
Split the node's operand with EXTRACT_SUBVECTOR and return the low/high part.
SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
const TargetLowering & getTargetLoweringInfo() const
std::pair< EVT, EVT > GetSplitDestVTs(const EVT &VT) const
Compute the VTs needed for the low/hi parts of a type which is split (or expanded) into two not neces...
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
const DataLayout & getDataLayout() const
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, bool isTargetGA=false, unsigned TargetFlags=0)
void ReplaceAllUsesWith(SDValue From, SDValue To)
Modify anything using 'From' to use 'To' instead.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
SDValue getRegister(unsigned Reg, EVT VT)
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
SDValue getMaskedStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Base, SDValue Offset, SDValue Mask, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, uint64_t Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side,...
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements.
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
SDValue getRegisterMask(const uint32_t *RegMask)
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
SDValue getCondCode(ISD::CondCode Cond)
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
LLVMContext * getContext() const
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
void addCallSiteInfo(const SDNode *CallNode, CallSiteInfoImpl &&CallInfo)
SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Base, SDValue Offset, SDValue Mask, SDValue Src0, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, ISD::LoadExtType, bool IsExpanding=false)
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
This instruction constructs a fixed permutation of two input vectors.
VectorType * getType() const
Overload to return most specific vector type.
static void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
static bool isExtractSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is an extract subvector mask.
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
static bool isSplatMask(const int *Mask, EVT VT)
StackOffset is a wrapper around scalable and non-scalable offsets and is used in several functions su...
An instruction for storing to memory.
This class is used to represent ISD::STORE nodes.
const SDValue & getBasePtr() const
const SDValue & getValue() const
StringRef - Represent a constant reference to a string, i.e.
std::enable_if_t< std::numeric_limits< T >::is_signed, bool > getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
LLVM_NODISCARD StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
LLVM_NODISCARD size_t size() const
size - Get the string size.
Class to represent struct types.
static StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
TargetInstrInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
void setTargetDAGCombine(ISD::NodeType NT)
Targets should invoke this method for each target independent node that they want to provide a custom...
virtual void finalizeLowering(MachineFunction &MF) const
Execute target specific actions to finalize target lowering.
bool PredictableSelectIsExpensive
Tells the code generator that select is more expensive than a branch if the branch is usually predict...
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
virtual Value * getSafeStackPointerLocation(IRBuilder<> &IRB) const
Returns the target-specific address of the unsafe stack pointer.
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
virtual Value * getIRStackGuard(IRBuilder<> &IRB) const
If the target has a standard location for the stack protector guard, returns the address of that loca...
MachineBasicBlock * emitPatchPoint(MachineInstr &MI, MachineBasicBlock *MBB) const
Replace/modify any TargetFrameIndex operands with a targte-dependent sequence of memory operands that...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
void setIndexedStoreAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
virtual bool shouldLocalize(const MachineInstr &MI, const TargetTransformInfo *TTI) const
Check whether or not MI needs to be moved close to its uses.
void setMaximumJumpTableSize(unsigned)
Indicate the maximum number of entries in jump tables.
const TargetMachine & getTargetMachine() const
unsigned MaxLoadsPerMemcmp
Specify maximum number of load instructions per memcmp call.
unsigned MaxGluedStoresPerMemcpy
Specify max number of store instructions to glue in inlined memcpy.
void setOperationPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
Convenience method to set an operation to Promote and specify the type in a single call.
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
bool isOperationLegalOrCustom(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
void setHasExtractBitsInsn(bool hasExtractInsn=true)
Tells the code generator that the target has BitExtract instructions.
virtual Value * getSDagStackGuard(const Module &M) const
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
void setPrefLoopAlignment(Align Alignment)
Set the target's preferred loop alignment.
unsigned getMaximumJumpTableSize() const
Return upper limit for number of entries in a jump table.
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const
Return the preferred vector type legalization action.
virtual Function * getSSPStackGuardCheck(const Module &M) const
If the target has a standard stack protection check function that performs validation and error handl...
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
void setCondCodeAction(ISD::CondCode CC, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
bool EnableExtLdPromotion
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
virtual bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT) const
Return true if it is profitable to reduce a load to a smaller type.
virtual bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y, unsigned OldShiftOpcode, unsigned NewShiftOpcode, SelectionDAG &DAG) const
Given the pattern (X & (C l>>/<< Y)) ==/!= 0 return true if it should be transformed into: ((X <</l>>...
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrOneBooleanContent
@ ZeroOrNegativeOneBooleanContent
unsigned MaxLoadsPerMemcmpOptSize
Likewise for functions with the OptSize attribute.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
MVT getFrameIndexTy(const DataLayout &DL) const
Return the type for frame index, which is determined by the alloca address space specified through th...
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
virtual void insertSSPDeclarations(Module &M) const
Inserts necessary declarations for SSP (stack protection) purpose.
void setIndexedLoadAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
SDValue scalarizeVectorStore(StoreSDNode *ST, SelectionDAG &DAG) const
virtual bool useLoadStackGuardNode() const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS, SDValue &NewRHS, ISD::CondCode &CCCode, const SDLoc &DL, const SDValue OldLHS, const SDValue OldRHS) const
Soften the operands of a comparison.
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
bool parametersInCSRMatch(const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask, const SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< SDValue > &OutVals) const
Check whether parameters to a call that are passed in callee saved registers are the same as from the...
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Op.
Primary interface to the complete machine description for the target machine.
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
const Triple & getTargetTriple() const
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
unsigned UnsafeFPMath
UnsafeFPMath - This flag is enabled when the -enable-unsafe-fp-math flag is specified on the command ...
unsigned EmitCallSiteInfo
The flag enables call site info production.
unsigned TLSSize
Bit size of immediate TLS offsets (0 == use the default).
unsigned NoNaNsFPMath
NoNaNsFPMath - This flag is enabled when the -enable-no-nans-fp-math flag is specified on the command...
unsigned GuaranteedTailCallOpt
GuaranteedTailCallOpt - This flag is enabled when -tailcallopt is specified on the commandline.
FPOpFusion::FPOpFusionMode AllowFPOpFusion
AllowFPOpFusion - This flag is set by the -fuse-fp-ops=xxx option.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Triple - Helper class for working with autoconf configuration names.
bool isOSMSVCRT() const
Is this a "Windows" OS targeting a "MSVCRT.dll" environment.
bool isWindowsMSVCEnvironment() const
Checks if the environment could be MSVC.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
uint64_t getKnownMinSize() const
The instances of the Type class are immutable: once they are created, they are never changed.
static IntegerType * getInt64Ty(LLVMContext &C)
bool isVectorTy() const
True if this is an instance of VectorType.
bool isPointerTy() const
True if this is an instance of PointerType.
@ FloatTyID
32-bit floating point type
@ DoubleTyID
64-bit floating point type
static Type * getVoidTy(LLVMContext &C)
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
bool isFloatingPointTy() const
Return true if this is one of the six floating-point types.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
This class is used to represent EVT's, which are used to parameterize some operations.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
Base class of all SIMD vector types.
Type * getElementType() const
Implementation for an ilist node.
ilist_node_impl()=default
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
static CondCode getInvertedCondCode(CondCode Code)
static unsigned getNZCVToSatisfyCondCode(CondCode Code)
Given a condition code, return NZCV flags that would satisfy that condition.
@ MO_DLLIMPORT
MO_DLLIMPORT - On a symbol operand, this represents that the reference to the symbol is for an import...
@ MO_NC
MO_NC - Indicates whether the linker is expected to check the symbol reference for overflow.
@ MO_G1
MO_G1 - A symbol operand with this flag (granule 1) represents the bits 16-31 of a 64-bit address,...
@ MO_PAGEOFF
MO_PAGEOFF - A symbol operand with this flag represents the offset of that symbol within a 4K page.
@ MO_GOT
MO_GOT - This flag indicates that a symbol operand represents the address of the GOT entry for the sy...
@ MO_G0
MO_G0 - A symbol operand with this flag (granule 0) represents the bits 0-15 of a 64-bit address,...
@ MO_PAGE
MO_PAGE - A symbol operand with this flag represents the pc-relative offset of the 4K page containing...
@ MO_HI12
MO_HI12 - This flag indicates that a symbol operand represents the bits 13-24 of a 64-bit address,...
@ MO_TLS
MO_TLS - Indicates that the operand being accessed is some kind of thread-local symbol.
@ MO_G2
MO_G2 - A symbol operand with this flag (granule 2) represents the bits 32-47 of a 64-bit address,...
@ MO_G3
MO_G3 - A symbol operand with this flag (granule 3) represents the high 16-bits of a 64-bit address,...
@ MO_COFFSTUB
MO_COFFSTUB - On a symbol operand "FOO", this indicates that the reference is actually to the "....
@ GLDFF1S_SXTW_MERGE_ZERO
@ GLDFF1_SCALED_MERGE_ZERO
@ GLD1_SXTW_SCALED_MERGE_ZERO
@ GLDFF1_SXTW_SCALED_MERGE_ZERO
@ GLD1S_UXTW_SCALED_MERGE_ZERO
@ GLDNT1_INDEX_MERGE_ZERO
@ GLDFF1_UXTW_SCALED_MERGE_ZERO
@ GLDFF1S_SCALED_MERGE_ZERO
@ GLDFF1S_UXTW_SCALED_MERGE_ZERO
@ NVCAST
Natural vector cast.
@ GLDFF1S_UXTW_MERGE_ZERO
@ GLDFF1S_SXTW_SCALED_MERGE_ZERO
@ GLD1S_SCALED_MERGE_ZERO
@ GLD1_UXTW_SCALED_MERGE_ZERO
@ GLD1S_SXTW_SCALED_MERGE_ZERO
static bool isLogicalImmediate(uint64_t imm, unsigned regSize)
isLogicalImmediate - Return true if the immediate is valid for a logical immediate instruction of the...
static uint8_t encodeAdvSIMDModImmType2(uint64_t Imm)
static bool isAdvSIMDModImmType9(uint64_t Imm)
static bool isAdvSIMDModImmType4(uint64_t Imm)
static bool isAdvSIMDModImmType5(uint64_t Imm)
static int getFP32Imm(const APInt &Imm)
getFP32Imm - Return an 8-bit floating-point version of the 32-bit floating-point value.
static uint8_t encodeAdvSIMDModImmType7(uint64_t Imm)
static uint8_t encodeAdvSIMDModImmType12(uint64_t Imm)
static uint8_t encodeAdvSIMDModImmType10(uint64_t Imm)
static uint8_t encodeAdvSIMDModImmType9(uint64_t Imm)
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static bool isAdvSIMDModImmType7(uint64_t Imm)
static uint8_t encodeAdvSIMDModImmType5(uint64_t Imm)
static int getFP64Imm(const APInt &Imm)
getFP64Imm - Return an 8-bit floating-point version of the 64-bit floating-point value.
static bool isAdvSIMDModImmType10(uint64_t Imm)
static int getFP16Imm(const APInt &Imm)
getFP16Imm - Return an 8-bit floating-point version of the 16-bit floating-point value.
static uint8_t encodeAdvSIMDModImmType8(uint64_t Imm)
static bool isAdvSIMDModImmType12(uint64_t Imm)
static uint8_t encodeAdvSIMDModImmType11(uint64_t Imm)
static bool isAdvSIMDModImmType11(uint64_t Imm)
static uint8_t encodeAdvSIMDModImmType6(uint64_t Imm)
static bool isAdvSIMDModImmType8(uint64_t Imm)
static uint8_t encodeAdvSIMDModImmType4(uint64_t Imm)
static bool isAdvSIMDModImmType6(uint64_t Imm)
static uint8_t encodeAdvSIMDModImmType1(uint64_t Imm)
static uint8_t encodeAdvSIMDModImmType3(uint64_t Imm)
static bool isAdvSIMDModImmType2(uint64_t Imm)
static bool isAdvSIMDModImmType3(uint64_t Imm)
static bool isAdvSIMDModImmType1(uint64_t Imm)
void expandMOVImm(uint64_t Imm, unsigned BitSize, SmallVectorImpl< ImmInsnModel > &Insn)
Expand a MOVi32imm or MOVi64imm pseudo instruction to one or more real move-immediate instructions to...
const unsigned NeonBitsPerVector
static constexpr unsigned SVEBitsPerBlock
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ AArch64_SVE_VectorCall
Calling convention between AArch64 SVE functions.
@ CFGuard_Check
Special calling convention on Windows for calling the Control Guard Check ICall funtion.
@ Fast
Fast - This calling convention attempts to make calls as fast as possible (e.g.
@ Win64
The C convention as implemented on Windows/x86-64 and AArch64.
@ C
C - The default llvm calling convention, compatible with C.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ FLT_ROUNDS_
FLT_ROUNDS_ - Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest 2 Round to ...
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ VECREDUCE_FMAX
FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ SIGN_EXTEND
Conversion operators.
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ SSUBO
Same for subtraction.
@ BRIND
BRIND - Indirect branch.
@ BR_JT
BR_JT - Jumptable branch.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ VSCALE
VSCALE(IMM) - Returns the runtime scaling factor used to calculate the number of elements within a sc...
@ ATOMIC_CMP_SWAP
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo,...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ SMULO
Same for multiplication.
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum or signed or unsigned integers.
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ STRICT_SINT_TO_FP
STRICT_[US]INT_TO_FP - Convert a signed or unsigned integer to a floating point value.
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
@ TargetConstant
TargetConstant* - Like Constant*, but the DAG does not do any folding, simplification,...
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ BRCOND
BRCOND - Conditional branch.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
bool isOverflowIntrOpRes(SDValue Op)
Returns true if the specified value is the overflow result from one of the overflow intrinsic nodes.
CondCode getSetCCInverse(CondCode Operation, EVT Type)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
bool isBuildVectorAllOnes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are ~0 or undef.
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
static const int LAST_INDEXED_MODE
bool isNormalLoad(const SDNode *N)
Returns true if the specified node is a non-extending and unindexed load.
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Flag
These should be considered private to the implementation of the MCInstrDesc class.
bool match(Val *V, const Pattern &P)
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
class_match< UndefValue > m_Undef()
Match an arbitrary undef constant.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
match_combine_or< CastClass_match< OpTy, Instruction::ZExt >, CastClass_match< OpTy, Instruction::SExt > > m_ZExtOrSExt(const OpTy &Op)
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Libcall getSINTTOFP(EVT OpVT, EVT RetVT)
getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall getUINTTOFP(EVT OpVT, EVT RetVT)
getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
Libcall getFPTOUINT(EVT OpVT, EVT RetVT)
getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall getFPTOSINT(EVT OpVT, EVT RetVT)
getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall getFPEXT(EVT OpVT, EVT RetVT)
getFPEXT - Return the FPEXT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall getFPROUND(EVT OpVT, EVT RetVT)
getFPROUND - Return the FPROUND_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
initializer< Ty > init(const Ty &Val)
CodeModel::Model getCodeModel()
This class represents lattice values for constants.
bool RetCC_AArch64_AAPCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
bool CC_AArch64_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
bool operator==(uint64_t V1, const APInt &V2)
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
bool RetCC_AArch64_WebKit_JS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
bool CC_AArch64_DarwinPCS_VarArg(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
testing::Matcher< const detail::ErrorHolder & > Failed()
bool CC_AArch64_Win64_CFGuard_Check(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
bool isAcquireOrStronger(AtomicOrdering ao)
Value * concatenateVectors(IRBuilderBase &Builder, ArrayRef< Value * > Vecs)
Concatenate a list of vectors.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
void shuffle(Iterator first, Iterator last, RNG &&g)
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
unsigned M1(unsigned Val)
static Error getOffset(const SymbolRef &Sym, SectionRef Sec, uint64_t &Result)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
bool CC_AArch64_Win64_VarArg(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
unsigned countLeadingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the most significant bit to the least stopping at the first 1.
constexpr size_t array_lengthof(T(&)[N])
Find the length of an array.
unsigned countTrailingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the least significant bit to the most stopping at the first 1.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
bool CC_AArch64_AAPCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
constexpr bool isMask_64(uint64_t Value)
Return true if the argument is a non-empty sequence of ones starting at the least significant bit wit...
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
AtomicOrdering
Atomic ordering for LLVM's memory model.
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
bool CC_AArch64_DarwinPCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
constexpr uint64_t MinAlign(uint64_t A, uint64_t B)
A and B are either alignments or offsets.
@ Invalid
Denotes invalid value.
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr unsigned BitWidth
bool isReleaseOrStronger(AtomicOrdering ao)
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
gep_type_iterator gep_type_begin(const User *GEP)
bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
bool CC_AArch64_DarwinPCS_ILP32_VarArg(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
bool isNullFPConstant(SDValue V)
Returns true if V is an FP constant with a value of positive zero.
static const MachineMemOperand::Flags MOStridedAccess
bool CC_AArch64_WebKit_JS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
llvm::SmallVector< int, 16 > createSequentialMask(unsigned Start, unsigned NumInts, unsigned NumUndefs)
Create a sequential shuffle mask.
bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Helper structure to keep track of a SET_CC lowered into AArch64 code.
Helper structure to keep track of ISD::SET_CC operands.
Helper structure to be able to read SetCC information.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
TypeSize getScalarSizeInBits() const
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
bool isPow2VectorType() const
Returns true if the given vector is a power of 2.
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
bool is128BitVector() const
Return true if this is a 128-bit vector type.
EVT changeTypeToInteger()
Return the type converted to an equivalently sized integer or vector with integer element type.
EVT widenIntegerVectorElementType(LLVMContext &Context) const
Return a VT for an integer vector type with the size of the elements doubled.
bool isFixedLengthVector() const
std::string getEVTString() const
This function returns value type as a string, e.g. "i32".
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
EVT changeVectorElementType(EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
bool isInteger() const
Return true if this is an integer or a vector integer type.
bool is64BitVector() const
Return true if this is a 64-bit vector type.
Describes a register that needs to be forwarded from the prologue to a musttail call.
OutputArg - This struct carries flags and a value for a single outgoing (actual) argument or outgoing...
unsigned getBitWidth() const
Get the bit width of this value.
Structure used to represent pair of argument number after call lowering and register used to transfer...
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
MachinePointerInfo getWithOffset(int64_t O) const
static MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
These are IR-level optimization flags that may be propagated to SDNodes.
void setAllowReassociation(bool b)
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
This structure contains all information that is necessary for lowering calls.
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...
Helper structure to keep track of SetCC information.